repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
qxsch/QXSConsolas
examples/CopyThat/copyThat/requests/packages/chardet/constants.py
3008
1335
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### _debug = 0 eDetecting = 0 eFoundIt = 1 eNotMe = 2 eStart = 0 eError = 1 eItsMe = 2 SHORTCUT_THRESHOLD = 0.95
gpl-3.0
uclouvain/OSIS-Louvain
base/forms/utils/emptyfield.py
2
1459
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django import forms class EmptyField(forms.CharField): widget = forms.HiddenInput def __init__(self, label): super().__init__(label=label, required=False)
agpl-3.0
adrian-ionescu/apache-spark
python/pyspark/streaming/flume.py
8
6479
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys if sys.version >= "3": from io import BytesIO else: from StringIO import StringIO from py4j.protocol import Py4JJavaError from pyspark.storagelevel import StorageLevel from pyspark.serializers import PairDeserializer, NoOpSerializer, UTF8Deserializer, read_int from pyspark.streaming import DStream __all__ = ['FlumeUtils', 'utf8_decoder'] def utf8_decoder(s): """ Decode the unicode as UTF-8 """ if s is None: return None return s.decode('utf-8') class FlumeUtils(object): @staticmethod def createStream(ssc, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2, enableDecompression=False, bodyDecoder=utf8_decoder): """ Create an input stream that pulls events from Flume. :param ssc: StreamingContext object :param hostname: Hostname of the slave machine to which the flume data will be sent :param port: Port of the slave machine to which the flume data will be sent :param storageLevel: Storage level to use for storing the received objects :param enableDecompression: Should netty server decompress input stream :param bodyDecoder: A function used to decode body (default is utf8_decoder) :return: A DStream object .. note:: Deprecated in 2.3.0. Flume support is deprecated as of Spark 2.3.0. See SPARK-22142. """ warnings.warn( "Deprecated in 2.3.0. Flume support is deprecated as of Spark 2.3.0. " "See SPARK-22142.", DeprecationWarning) jlevel = ssc._sc._getJavaStorageLevel(storageLevel) helper = FlumeUtils._get_helper(ssc._sc) jstream = helper.createStream(ssc._jssc, hostname, port, jlevel, enableDecompression) return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder) @staticmethod def createPollingStream(ssc, addresses, storageLevel=StorageLevel.MEMORY_AND_DISK_2, maxBatchSize=1000, parallelism=5, bodyDecoder=utf8_decoder): """ Creates an input stream that is to be used with the Spark Sink deployed on a Flume agent. This stream will poll the sink for data and will pull events as they are available. :param ssc: StreamingContext object :param addresses: List of (host, port)s on which the Spark Sink is running. :param storageLevel: Storage level to use for storing the received objects :param maxBatchSize: The maximum number of events to be pulled from the Spark sink in a single RPC call :param parallelism: Number of concurrent requests this stream should send to the sink. Note that having a higher number of requests concurrently being pulled will result in this stream using more threads :param bodyDecoder: A function used to decode body (default is utf8_decoder) :return: A DStream object .. note:: Deprecated in 2.3.0. Flume support is deprecated as of Spark 2.3.0. See SPARK-22142. """ warnings.warn( "Deprecated in 2.3.0. Flume support is deprecated as of Spark 2.3.0. " "See SPARK-22142.", DeprecationWarning) jlevel = ssc._sc._getJavaStorageLevel(storageLevel) hosts = [] ports = [] for (host, port) in addresses: hosts.append(host) ports.append(port) helper = FlumeUtils._get_helper(ssc._sc) jstream = helper.createPollingStream( ssc._jssc, hosts, ports, jlevel, maxBatchSize, parallelism) return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder) @staticmethod def _toPythonDStream(ssc, jstream, bodyDecoder): ser = PairDeserializer(NoOpSerializer(), NoOpSerializer()) stream = DStream(jstream, ssc, ser) def func(event): headersBytes = BytesIO(event[0]) if sys.version >= "3" else StringIO(event[0]) headers = {} strSer = UTF8Deserializer() for i in range(0, read_int(headersBytes)): key = strSer.loads(headersBytes) value = strSer.loads(headersBytes) headers[key] = value body = bodyDecoder(event[1]) return (headers, body) return stream.map(func) @staticmethod def _get_helper(sc): try: return sc._jvm.org.apache.spark.streaming.flume.FlumeUtilsPythonHelper() except TypeError as e: if str(e) == "'JavaPackage' object is not callable": FlumeUtils._printErrorMsg(sc) raise @staticmethod def _printErrorMsg(sc): print(""" ________________________________________________________________________________________________ Spark Streaming's Flume libraries not found in class path. Try one of the following. 1. Include the Flume library and its dependencies with in the spark-submit command as $ bin/spark-submit --packages org.apache.spark:spark-streaming-flume:%s ... 2. Download the JAR of the artifact from Maven Central http://search.maven.org/, Group Id = org.apache.spark, Artifact Id = spark-streaming-flume-assembly, Version = %s. Then, include the jar in the spark-submit command as $ bin/spark-submit --jars <spark-streaming-flume-assembly.jar> ... ________________________________________________________________________________________________ """ % (sc.version, sc.version))
apache-2.0
adam111316/SickGear
sickbeard/name_parser/regexes.py
3
17598
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickGear. # # SickGear is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickGear is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickGear. If not, see <http://www.gnu.org/licenses/>. # all regexes are case insensitive normal_regexes = [ ('standard_repeat', # Show.Name.S01E02.S01E03.Source.Quality.Etc-Group # Show Name - S01E02 - S01E03 - S01E04 - Ep Name ''' ^(?P<series_name>.+?)[. _-]+ # Show_Name and separator s(?P<season_num>\d+)[. _-]* # S01 and optional separator e(?P<ep_num>\d+) # E02 and separator ([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator e(?P<extra_ep_num>\d+))+ # E03/etc and separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('fov_repeat', # Show.Name.1x02.1x03.Source.Quality.Etc-Group # Show Name - 1x02 - 1x03 - 1x04 - Ep Name ''' ^(?P<series_name>.+?)[. _-]+ # Show_Name and separator (?P<season_num>\d+)x # 1x (?P<ep_num>\d+) # 02 and separator ([. _-]+(?P=season_num)x # 1x (?P<extra_ep_num>\d+))+ # 03/etc and separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('standard', # Show.Name.S01E02.Source.Quality.Etc-Group # Show Name - S01E02 - My Ep Name # Show.Name.S01.E03.My.Ep.Name # Show.Name.S01E02E03.Source.Quality.Etc-Group # Show Name - S01E02-03 - My Ep Name # Show.Name.S01.E02.E03 ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator s(?P<season_num>\d+)[. _-]* # S01 and optional separator e(?P<ep_num>\d+) # E02 and separator (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>(?!(1080|720|480)[pi])\d+))* # additional E03/etc [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('fov', # Show_Name.1x02.Source_Quality_Etc-Group # Show Name - 1x02 - My Ep Name # Show_Name.1x02x03x04.Source_Quality_Etc-Group # Show Name - 1x02-03-04 - My Ep Name ''' ^((?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator (?P<season_num>\d+)x # 1x (?P<ep_num>\d+) # 02 and separator (([. _-]*x|-) # linking x/- char (?P<extra_ep_num> (?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps \d+))* # additional x03/etc [\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('scene_date_format', # Show.Name.2010.11.23.Source.Quality.Etc-Group # Show Name - 2010-11-23 - Ep Name ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator (?P<air_year>\d{4})[. _-]+ # 2010 and separator (?P<air_month>\d{2})[. _-]+ # 11 and separator (?P<air_day>\d{2}) # 23 and separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('stupid', # tpz-abc102 ''' (?P<release_group>.+?)-\w+?[\. ]? # tpz-abc (?!264) # don't count x264 (?P<season_num>\d{1,2}) # 1 (?P<ep_num>\d{2})$ # 02 ''' ), ('verbose', # Show Name Season 1 Episode 2 Ep Name ''' ^(?P<series_name>.+?)[. _-]+ # Show Name and separator season[. _-]+ # season and separator (?P<season_num>\d+)[. _-]+ # 1 episode[. _-]+ # episode and separator (?P<ep_num>\d+)[. _-]+ # 02 and separator (?P<extra_info>.+)$ # Source_Quality_Etc- ''' ), ('season_only', # Show.Name.S01.Source.Quality.Etc-Group ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator s(eason[. _-])? # S01/Season 01 (?P<season_num>\d+)[. _-]* # S01 and optional separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('no_season_multi_ep', # Show.Name.E02-03 # Show.Name.E02.2010 ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator (e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part (?P<ep_num>(\d+|[ivx]+)) # first ep num ((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner (?P<extra_ep_num>(?!(1080|720|480)[pi])(\d+|[ivx]+))[. _-]) # second ep num ([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('no_season_general', # Show.Name.E23.Test # Show.Name.Part.3.Source.Quality.Etc-Group # Show.Name.Part.1.and.Part.2.Blah-Group ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator (e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part (?P<ep_num>(\d+|([ivx]+(?=[. _-])))) # first ep num ([. _-]+((and|&|to)[. _-]+)? # and/&/to joiner ((e(p(isode)?)?|part|pt)[. _-]?) # e, ep, episode, or part (?P<extra_ep_num>(?!(1080|720|480)[pi]) (\d+|([ivx]+(?=[. _-]))))[. _-])* # second ep num ([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('bare', # Show.Name.102.Source.Quality.Etc-Group ''' ^(?P<series_name>.+?)[. _-]+ # Show_Name and separator (?P<season_num>\d{1,2}) # 1 (?P<ep_num>\d{2}) # 02 and separator ([. _-]+(?P<extra_info>(?!\d{3}[. _-]+)[^-]+) # Source_Quality_Etc- (-(?P<release_group>.+))?)?$ # Group ''' ), ('no_season', # Show Name - 01 - Ep Name # 01 - Ep Name ''' ^((?P<series_name>.+?)(?:[. _-]{2,}|[. _]))? # Show_Name and separator (?P<ep_num>\d{1,2}) # 01 (?:-(?P<extra_ep_num>\d{1,2}))* # 02 [. _-]+((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ] anime_regexes = [ ('anime_ultimate', ''' ^(?:\[(?P<release_group>.+?)\][ ._-]*) (?P<series_name>.+?)[ ._-]+ (?P<ep_ab_num>\d{1,3}) (-(?P<extra_ab_ep_num>\d{1,3}))?[ ._-]+? (?:v(?P<version>[0-9]))? (?:[\w\.]*) (?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp])) (?:[ ._]?\[(?P<crc>\w+)\])? .*? ''' ), ('anime_standard', # [Group Name] Show Name.13-14 # [Group Name] Show Name - 13-14 # Show Name 13-14 # [Group Name] Show Name.13 # [Group Name] Show Name - 13 # Show Name 13 ''' ^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (?P<series_name>.+?)[ ._-]+ # Show_Name and separator (?P<ep_ab_num>\d{1,3}) # E01 (-(?P<extra_ab_ep_num>\d{1,3}))? # E02 (v(?P<version>[0-9]))? # version [ ._-]+\[(?P<extra_info>\d{3,4}[xp]?\d{0,4}.+?)\] # Source_Quality_Etc- (\[(?P<crc>\w{8})\])? # CRC .*? # Separator and EOL '''), ('anime_standard_round', # [Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB] # [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC) ''' ^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (?P<series_name>.+?)[ ._-]+ # Show_Name and separator (?P<ep_ab_num>\d{1,3}) # E01 (-(?P<extra_ab_ep_num>\d{1,3}))? # E02 (v(?P<version>[0-9]))? # version [ ._-]+\((?P<extra_info>(CX[ ._-]?)?\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\) # Source_Quality_Etc- (\[(?P<crc>\w{8})\])? # CRC .*? # Separator and EOL '''), ('anime_slash', # [SGKK] Bleach 312v1 [720p/MKV] ''' ^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (?P<series_name>.+?)[ ._-]+ # Show_Name and separator (?P<ep_ab_num>\d{1,3}) # E01 (-(?P<extra_ab_ep_num>\d{1,3}))? # E02 (v(?P<version>[0-9]))? # version [ ._-]+\[(?P<extra_info>\d{3,4}p) # Source_Quality_Etc- (\[(?P<crc>\w{8})\])? # CRC .*? # Separator and EOL '''), ('anime_standard_codec', # [Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC] # [Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534] # [Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C] ''' ^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (?P<series_name>.+?)[ ._]* # Show_Name and separator ([ ._-]+-[ ._-]+[A-Z]+[ ._-]+)?[ ._-]+ # this will kick me in the butt one day (?P<ep_ab_num>\d{1,3}) # E01 (-(?P<extra_ab_ep_num>\d{1,3}))? # E02 (v(?P<version>[0-9]))? # version ([ ._-](\[\w{1,2}\])?\[[a-z][.]?\w{2,4}\])? # codec [ ._-]*\[(?P<extra_info>(\d{3,4}[xp]?\d{0,4})?[\.\w\s-]*)\] # Source_Quality_Etc- (\[(?P<crc>\w{8})\])? # CRC .*? # Separator and EOL '''), ('anime_and_normal', # Bleach - s16e03-04 - 313-314 # Bleach.s16e03-04.313-314 # Bleach s16e03e04 313-314 ''' ^(\[(?P<release_group>.+?)\][ ._-]*)? (?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator [sS](?P<season_num>\d+)[. _-]* # S01 and optional separator [eE](?P<ep_num>\d+) # epipisode E02 (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>\d+))* # additional E03/etc ([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be # there(->{2,}) "s16e03-04-313-314" would make sens any way (?P<ep_ab_num>\d{1,3}) # absolute number (-(?P<extra_ab_ep_num>\d{1,3}))* # "-" as separator and anditional absolute number, all optinal (v(?P<version>[0-9]))? # the version e.g. "v2" .*? ''' ), ('anime_and_normal_x', # Bleach - s16e03-04 - 313-314 # Bleach.s16e03-04.313-314 # Bleach s16e03e04 313-314 ''' ^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator (?P<season_num>\d+)[. _-]* # S01 and optional separator [xX](?P<ep_num>\d+) # epipisode E02 (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>\d+))* # additional E03/etc ([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be # there(->{2,}) "s16e03-04-313-314" would make sens any way (?P<ep_ab_num>\d{1,3}) # absolute number (-(?P<extra_ab_ep_num>\d{1,3}))* # "-" as separator and anditional absolute number, all optinal (v(?P<version>[0-9]))? # the version e.g. "v2" .*? ''' ), ('anime_and_normal_reverse', # Bleach - 313-314 - s16e03-04 ''' ^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator (?P<ep_ab_num>\d{1,3}) # absolute number (-(?P<extra_ab_ep_num>\d{1,3}))* # "-" as separator and anditional absolute number, all optinal (v(?P<version>[0-9]))? # the version e.g. "v2" ([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be # there(->{2,}) "s16e03-04-313-314" would make sens any way [sS](?P<season_num>\d+)[. _-]* # S01 and optional separator [eE](?P<ep_num>\d+) # epipisode E02 (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>\d+))* # additional E03/etc .*? ''' ), ('anime_and_normal_front', # 165.Naruto Shippuuden.s08e014 ''' ^(?P<ep_ab_num>\d{1,3}) # start of string and absolute number (-(?P<extra_ab_ep_num>\d{1,3}))* # "-" as separator and anditional absolute number, all optinal (v(?P<version>[0-9]))?[ ._-]+ # the version e.g. "v2" (?P<series_name>.+?)[ ._-]+ [sS](?P<season_num>\d+)[. _-]* # S01 and optional separator [eE](?P<ep_num>\d+) (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>\d+))* # additional E03/etc .*? ''' ), ('anime_ep_name', ''' ^(?:\[(?P<release_group>.+?)\][ ._-]*) (?P<series_name>.+?)[ ._-]+ (?P<ep_ab_num>\d{1,3}) (-(?P<extra_ab_ep_num>\d{1,3}))*[ ._-]*? (?:v(?P<version>[0-9])[ ._-]+?)? (?:.+?[ ._-]+?)? \[(?P<extra_info>\w+)\][ ._-]? (?:\[(?P<crc>\w{8})\])? .*? ''' ), ('anime_bare', # One Piece - 102 # [ACX]_Wolf's_Spirit_001.mkv ''' ^(\[(?P<release_group>.+?)\][ ._-]*)? (?P<series_name>.+?)[ ._-]+ # Show_Name and separator (?<!H.)(?P<ep_ab_num>\d{3})(?!0p) # E01, while avoiding H.264 and 1080p from being matched (-(?P<extra_ab_ep_num>\d{3}))* # E02 (v(?P<version>[0-9]))? # v2 .*? # Separator and EOL '''), ('standard', # Show.Name.S01E02.Source.Quality.Etc-Group # Show Name - S01E02 - My Ep Name # Show.Name.S01.E03.My.Ep.Name # Show.Name.S01E02E03.Source.Quality.Etc-Group # Show Name - S01E02-03 - My Ep Name # Show.Name.S01.E02.E03 ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator s(?P<season_num>\d+)[. _-]* # S01 and optional separator e(?P<ep_num>\d+) # E02 and separator (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>(?!(1080|720|480)[pi])\d+))* # additional E03/etc [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ]
gpl-3.0
gjreda/pelican-plugins
rmd_reader/test_rmd_reader.py
21
5842
''' Created on Jan 25, 2016 @author: Aaron Kitzmiller <[email protected]? ''' import unittest, os, sys import shutil import logging import glob from pelican import Pelican from pelican.settings import read_settings logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) class Test(unittest.TestCase): def setUp(self): try: import rpy2 import rmd_reader except Exception: raise unittest.SkipTest("rpy not installed. Will not test rmd_reader.") self.testtitle = 'rtest' self.cwd = os.path.dirname(os.path.abspath(__file__)) logging.debug(self.cwd) # Setup content dir and test rmd file self.contentdir = os.path.join(self.cwd,'test-content') logging.debug(self.contentdir) try: os.mkdir(self.contentdir) except Exception: pass self.contentfile = os.path.join(self.contentdir,'test.rmd') logging.debug(self.contentfile) self.testrmd = '''Title: %s Date: 2014-06-23 Let's make a simple plot about cars. ```{r} cars <- c(1, 3, 6, 4, 9) plot(cars) ``` ''' % self.testtitle with open(self.contentfile,'w') as f: f.write(self.testrmd) # Setup output dir self.outputdir = os.path.join(self.cwd,'test-output') logging.debug(self.outputdir) try: os.mkdir(self.outputdir) except Exception: pass self.figpath = 'images' def tearDown(self): logging.debug('CLEAN') if os.path.isdir(self.outputdir): shutil.rmtree(self.outputdir) if os.path.isdir(self.contentdir): shutil.rmtree(self.contentdir) def testKnitrSettings(self): settings = read_settings(path=None, override={ 'LOAD_CONTENT_CACHE': False, 'PATH': self.contentdir, 'OUTPUT_PATH': self.outputdir, 'RMD_READER_KNITR_OPTS_CHUNK': {'fig.path' : '%s/' % self.figpath}, 'RMD_READER_KNITR_OPTS_KNIT': {'progress' : True, 'verbose': True}, 'RMD_READER_RENAME_PLOT': 'disable', 'PLUGIN_PATHS': ['../'], 'PLUGINS': ['rmd_reader'], }) pelican = Pelican(settings=settings) pelican.run() outputfilename = os.path.join(self.outputdir,'%s.html' % self.testtitle) self.assertTrue(os.path.exists(outputfilename),'File %s was not created.' % outputfilename) imagesdir = os.path.join(self.outputdir, self.figpath) self.assertTrue(os.path.exists(imagesdir), 'figpath not created.') imagefile = os.path.join(imagesdir, 'unnamed-chunk') + '-1-1.png' logging.debug(imagefile) images = glob.glob('%s/*' % imagesdir) logging.debug(images) self.assertTrue(os.path.exists(imagefile), 'image correctly named.') self.assertTrue(len(images) == 1,'Contents of images dir is not correct: %s' % ','.join(images)) def testKnitrSettings2(self): settings = read_settings(path=None, override={ 'LOAD_CONTENT_CACHE': False, 'PATH': self.contentdir, 'OUTPUT_PATH': self.outputdir, 'RMD_READER_KNITR_OPTS_CHUNK': {'fig.path' : '%s/' % self.figpath}, 'RMD_READER_KNITR_OPTS_KNIT': {'progress' : True, 'verbose': True}, 'RMD_READER_RENAME_PLOT': 'chunklabel', 'PLUGIN_PATHS': ['../'], 'PLUGINS': ['rmd_reader'], }) pelican = Pelican(settings=settings) pelican.run() outputfilename = os.path.join(self.outputdir,'%s.html' % self.testtitle) self.assertTrue(os.path.exists(outputfilename),'File %s was not created.' % outputfilename) imagesdir = os.path.join(self.outputdir, self.figpath) self.assertTrue(os.path.exists(imagesdir), 'figpath not created.') imagefile = os.path.join(imagesdir, os.path.splitext(os.path.split(self.contentfile)[1])[0]) + '-1-1.png' logging.debug(imagefile) self.assertTrue(os.path.exists(imagefile), 'image correctly named.') images = glob.glob('%s/*' % imagesdir) logging.debug(images) self.assertTrue(len(images) == 1,'Contents of images dir is not correct: %s' % ','.join(images)) def testKnitrSettings3(self): settings = read_settings(path=None, override={ 'LOAD_CONTENT_CACHE': False, 'PATH': self.contentdir, 'OUTPUT_PATH': self.outputdir, 'RMD_READER_KNITR_OPTS_CHUNK': {'fig.path' : '%s/' % self.figpath}, 'RMD_READER_KNITR_OPTS_KNIT': {'progress' : True, 'verbose': True}, 'RMD_READER_RENAME_PLOT': 'directory', 'PLUGIN_PATHS': ['../'], 'PLUGINS': ['rmd_reader'], }) pelican = Pelican(settings=settings) pelican.run() outputfilename = os.path.join(self.outputdir,'%s.html' % self.testtitle) self.assertTrue(os.path.exists(outputfilename),'File %s was not created.' % outputfilename) imagesdir = os.path.join(self.outputdir, self.figpath) self.assertTrue(os.path.exists(imagesdir), 'figpath not created.') imagefile = os.path.join(imagesdir, os.path.splitext(os.path.split(self.contentfile)[1])[0]) + '-unnamed-chunk-1-1.png' logging.debug(imagefile) self.assertTrue(os.path.exists(imagefile), 'image correctly named.') images = glob.glob('%s/*' % imagesdir) logging.debug(images) self.assertTrue(len(images) == 1,'Contents of images dir is not correct: %s' % ','.join(images)) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
agpl-3.0
magicrub/MissionPlanner
Lib/site-packages/numpy/fft/tests/test_helper.py
51
1817
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe" # Copied from fftpack.helper by Pearu Peterson, October 2005 """ Test functions for fftpack.helper module """ from numpy.testing import * from numpy.fft import fftshift,ifftshift,fftfreq from numpy import pi def random(size): return rand(*size) class TestFFTShift(TestCase): def test_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] y = [-4,-3,-2,-1,0,1,2,3,4] assert_array_almost_equal(fftshift(x),y) assert_array_almost_equal(ifftshift(y),x) x = [0,1,2,3,4,-5,-4,-3,-2,-1] y = [-5,-4,-3,-2,-1,0,1,2,3,4] assert_array_almost_equal(fftshift(x),y) assert_array_almost_equal(ifftshift(y),x) def test_inverse(self): for n in [1,4,9,100,211]: x = random((n,)) assert_array_almost_equal(ifftshift(fftshift(x)),x) def test_axes_keyword(self): freqs = [[ 0, 1, 2], [ 3, 4, -4], [-3, -2, -1]] shifted = [[-1, -3, -2], [ 2, 0, 1], [-4, 3, 4]] assert_array_almost_equal(fftshift(freqs, axes=(0, 1)), shifted) assert_array_almost_equal(fftshift(freqs, axes=0), fftshift(freqs, axes=(0,))) assert_array_almost_equal(ifftshift(shifted, axes=(0, 1)), freqs) assert_array_almost_equal(ifftshift(shifted, axes=0), ifftshift(shifted, axes=(0,))) class TestFFTFreq(TestCase): def test_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] assert_array_almost_equal(9*fftfreq(9),x) assert_array_almost_equal(9*pi*fftfreq(9,pi),x) x = [0,1,2,3,4,-5,-4,-3,-2,-1] assert_array_almost_equal(10*fftfreq(10),x) assert_array_almost_equal(10*pi*fftfreq(10,pi),x) if __name__ == "__main__": run_module_suite()
gpl-3.0
gkunter/coquery
coquery/functionlist.py
1
4999
# -*- coding: utf-8 -*- """ functionlist.py is part of Coquery. Copyright (c) 2016-2018 Gero Kunter ([email protected]) Coquery is released under the terms of the GNU General Public License (v3). For details, see the file LICENSE that you should have received along with Coquery. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import unicode_literals import pandas as pd import datetime import warnings import sys from . import options from .general import CoqObject from .errors import RegularExpressionError class FunctionList(CoqObject): def __init__(self, l=None, *args, **kwargs): super(FunctionList, self).__init__() self._exceptions = [] if l: self._list = l else: self._list = [] def lapply(self, df=None, session=None, manager=None): """ Apply all functions in the list to the data frame. """ # in order to allow zero frequencies for empty result tables, empty # data frames can be retained if a function demands it. This is # handled by keeping track of the drop_on_na attribute. As soon as # one function in the list wishes to retain a data frame that contains # NAs, the data frame will not be dropped. # This code only keeps track of the attributes. The actual dropping # takes place (or doesn't) in the summarize() method of the manager. if manager: # FIXME: The following check is super weird. The variable # `drop_on_na` is always either True or None, but never False. # This can't be right. if manager.drop_on_na is not None: drop_on_na = True else: drop_on_na = manager.drop_on_na else: drop_on_na = True self._exceptions = [] for fun in list(self._list): if any(col not in df.columns for col in fun.columns): self._list.remove(fun) continue if options.cfg.drop_on_na: drop_on_na = True else: drop_on_na = drop_on_na and fun.drop_on_na new_column = fun.get_id() try: if options.cfg.benchmark: print(fun.get_name()) then = datetime.datetime.now() for x in range(5000): val = fun.evaluate(df, **fun.kwargs) print(datetime.datetime.now() - then) else: val = fun.evaluate(df, **fun.kwargs) except Exception as e: # if an exception occurs, the error is logged, and an empty # column containing only NAs is added if isinstance(e, RegularExpressionError): error = e.error_message.strip() else: error = "Error during function call {}".format( fun.get_label(session)) self._exceptions.append((error, e, sys.exc_info())) val = pd.Series([None] * len(df), name=new_column) finally: # Functions can return either single columns or data frames. # Handle the function result accordingly: if fun.single_column: df[new_column] = val else: df = pd.concat([df, val], axis="columns") # tell the manager whether rows with NA will be dropped: if manager: manager.drop_on_na = drop_on_na return df def exceptions(self): return self._exceptions def get_list(self): return self._list def set_list(self, l): self._list = l def find_function(self, fun_id): for x in self._list: if x.get_id() == fun_id: return x return None def has_function(self, fun): for x in self._list: if x.get_id() == fun.get_id(): return True return False def add_function(self, fun): if not self.has_function(fun): self._list.append(fun) else: warnings.warn("Function duplicate not added: {}".format(fun)) def remove_function(self, fun): self._list.remove(fun) for x in self._list: if x.get_id() == fun.get_id(): self.remove_function(x) def replace_function(self, old, new): ix = self._list.index(old) self._list[ix] = new # update references to the replaced function: for i, func in enumerate(self._list[ix:]): func.columns = [new.get_id() if col == old.get_id() else col for col in func.columns] def __iter__(self): return self._list.__iter__() def __repr__(self): s = super(FunctionList, self).__repr__() return "{}({})".format( s, self._list.__repr__())
gpl-3.0
whn09/tensorflow
tensorflow/contrib/factorization/python/ops/gmm_ops_test.py
53
7705
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for gmm_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.factorization.python.ops import gmm_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed as random_seed_lib from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging class GmmOpsTest(test.TestCase): def setUp(self): self.num_examples = 1000 self.iterations = 40 self.seed = 4 random_seed_lib.set_random_seed(self.seed) np.random.seed(self.seed * 2) self.data, self.true_assignments = self.make_data(self.num_examples) # Generate more complicated data. self.centers = [[1, 1], [-1, 0.5], [2, 1]] self.more_data, self.more_true_assignments = self.make_data_from_centers( self.num_examples, self.centers) @staticmethod def make_data(num_vectors): """Generates 2-dimensional data centered on (2,2), (-1,-1). Args: num_vectors: number of training examples. Returns: A tuple containing the data as a numpy array and the cluster ids. """ vectors = [] classes = [] for _ in xrange(num_vectors): if np.random.random() > 0.5: vectors.append([np.random.normal(2.0, 0.6), np.random.normal(2.0, 0.9)]) classes.append(0) else: vectors.append( [np.random.normal(-1.0, 0.4), np.random.normal(-1.0, 0.5)]) classes.append(1) return np.asarray(vectors), classes @staticmethod def make_data_from_centers(num_vectors, centers): """Generates 2-dimensional data with random centers. Args: num_vectors: number of training examples. centers: a list of random 2-dimensional centers. Returns: A tuple containing the data as a numpy array and the cluster ids. """ vectors = [] classes = [] for _ in xrange(num_vectors): current_class = np.random.random_integers(0, len(centers) - 1) vectors.append([ np.random.normal(centers[current_class][0], np.random.random_sample()), np.random.normal(centers[current_class][1], np.random.random_sample()) ]) classes.append(current_class) return np.asarray(vectors), len(centers) def test_covariance(self): start_time = time.time() data = self.data.T np_cov = np.cov(data) logging.info('Numpy took %f', time.time() - start_time) start_time = time.time() with self.test_session() as sess: op = gmm_ops._covariance( constant_op.constant( data.T, dtype=dtypes.float32), False) op_diag = gmm_ops._covariance( constant_op.constant( data.T, dtype=dtypes.float32), True) variables.global_variables_initializer().run() tf_cov = sess.run(op) np.testing.assert_array_almost_equal(np_cov, tf_cov) logging.info('Tensorflow took %f', time.time() - start_time) tf_cov = sess.run(op_diag) np.testing.assert_array_almost_equal( np.diag(np_cov), np.ravel(tf_cov), decimal=5) def test_simple_cluster(self): """Tests that the clusters are correct.""" num_classes = 2 graph = ops.Graph() with graph.as_default() as g: g.seed = 5 with self.test_session() as sess: data = constant_op.constant(self.data, dtype=dtypes.float32) _, assignments, _, training_op = gmm_ops.gmm(data, 'random', num_classes, random_seed=self.seed) variables.global_variables_initializer().run() for _ in xrange(self.iterations): sess.run(training_op) assignments = sess.run(assignments) accuracy = np.mean( np.asarray(self.true_assignments) == np.squeeze(assignments)) logging.info('Accuracy: %f', accuracy) self.assertGreater(accuracy, 0.98) def testParams(self): """Tests that the params work as intended.""" num_classes = 2 with self.test_session() as sess: # Experiment 1. Update weights only. data = constant_op.constant(self.data, dtype=dtypes.float32) gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes, [[3.0, 3.0], [0.0, 0.0]], 'w') training_ops = gmm_tool.training_ops() variables.global_variables_initializer().run() for _ in xrange(self.iterations): sess.run(training_ops) # Only the probability to each class is updated. alphas = sess.run(gmm_tool.alphas()) self.assertGreater(alphas[1], 0.6) means = sess.run(gmm_tool.clusters()) np.testing.assert_almost_equal( np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means) covs = sess.run(gmm_tool.covariances()) np.testing.assert_almost_equal(covs[0], covs[1]) # Experiment 2. Update means and covariances. gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes, [[3.0, 3.0], [0.0, 0.0]], 'mc') training_ops = gmm_tool.training_ops() variables.global_variables_initializer().run() for _ in xrange(self.iterations): sess.run(training_ops) alphas = sess.run(gmm_tool.alphas()) self.assertAlmostEqual(alphas[0], alphas[1]) means = sess.run(gmm_tool.clusters()) np.testing.assert_almost_equal( np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1) covs = sess.run(gmm_tool.covariances()) np.testing.assert_almost_equal( [[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4) np.testing.assert_almost_equal( [[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4) # Experiment 3. Update covariances only. gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes, [[-1.0, -1.0], [1.0, 1.0]], 'c') training_ops = gmm_tool.training_ops() variables.global_variables_initializer().run() for _ in xrange(self.iterations): sess.run(training_ops) alphas = sess.run(gmm_tool.alphas()) self.assertAlmostEqual(alphas[0], alphas[1]) means = sess.run(gmm_tool.clusters()) np.testing.assert_almost_equal( np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means) covs = sess.run(gmm_tool.covariances()) np.testing.assert_almost_equal( [[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5) np.testing.assert_almost_equal( [[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5) if __name__ == '__main__': test.main()
apache-2.0
DaveTCode/PlatoonNiNoKuni
house_ai.py
1
3684
import random from campaign import HANDS_IN_CAMPAIGN from ai import GameAI class HouseAI(GameAI): def __init__(self): GameAI.__init__(self, "Implementation of the house AI") def distribute_hands(self, campaign): ''' Replicate the game AI method of allocating out cards to hands. Done in three steps: 1) Allocate out all bishops then kings, left to right 2) Allocate out any number cards to ensure that each hand has at least one card and the rest at random ensuring that if a hand contains a number card then all cards to it's right also do. 3) Allocate out jokers from left to right. ''' self._allocate_specials(campaign) self._allocate_numbers(campaign) self._allocate_jokers(campaign) def play_hand(self, campaign, opponent_campaign): raise NotImplementedError("Doesn't work yet. Sorry.") def _allocate_specials(self, campaign): ''' Allocate out the special cards that are in the campaign. This is deterministic and is done by allocating all bishops left to right followed by all kings in the same manner. The rule is that each hand can have at most one special card. ''' for special_type in ["bishop", "king"]: for card in [card for card in campaign.cards if card.name.lower() == special_type]: campaign.add_card_to_hand(campaign.next_hand_w_no_card_type("bishop", "king"), card) def _allocate_numbers(self, campaign): ''' Allocate number cards (including Queen and Jack) out between the hands. This is based on a pseudo random algorithm and is probably not quite what the computer does. Each hand must have one card, so out of the number cards, one card is allocated to each empty hand. After that the remaining cards are spread between a random number of piles ensuring that any pile containing a number must also be followed by piles containing numbers. ''' # Randomize the number cards number_cards = sorted([card for card in campaign.cards if not card.is_special()], key=lambda *args: random.random()) # Allocate one card per empty pile first_empty_hand = campaign.next_empty_hand() for hand_index in range(first_empty_hand, HANDS_IN_CAMPAIGN): card = number_cards.pop() campaign.add_card_to_hand(hand_index, card) # For the remaining numbers (if any) we decide how many piles to # distribute over (at random) and then allocate from right to left. piles_to_use = random.randint(1, min(len(number_cards), HANDS_IN_CAMPAIGN)) first_pile_to_use = HANDS_IN_CAMPAIGN - piles_to_use for card in number_cards: hand_index = (number_cards.index(card) % piles_to_use) + first_pile_to_use campaign.add_card_to_hand(hand_index, card) def _allocate_jokers(self, campaign): ''' Allocate out all jokers that are in the campaign. This is deterministic although it is based on incomplete information. Jokers are allocated left to right after all other cards have been allocated. It's possible that this isn't what the computer does. More research required @@@TODO. ''' for card in [card for card in campaign.cards if card.name.lower() == "joker"]: campaign.add_card_to_hand(campaign.next_hand_w_no_card_type("joker"), card)
mit
menardorama/ReadyNAS-Add-ons
headphones-1.0.0/files/apps/headphones/headphones/db.py
12
3820
# This file is part of Headphones. # # Headphones is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Headphones is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Headphones. If not, see <http://www.gnu.org/licenses/>. ##################################### ## Stolen from Sick-Beard's db.py ## ##################################### from __future__ import with_statement import os import sqlite3 import headphones from headphones import logger def dbFilename(filename="headphones.db"): return os.path.join(headphones.DATA_DIR, filename) def getCacheSize(): #this will protect against typecasting problems produced by empty string and None settings if not headphones.CONFIG.CACHE_SIZEMB: #sqlite will work with this (very slowly) return 0 return int(headphones.CONFIG.CACHE_SIZEMB) class DBConnection: def __init__(self, filename="headphones.db"): self.filename = filename self.connection = sqlite3.connect(dbFilename(filename), timeout=20) #don't wait for the disk to finish writing self.connection.execute("PRAGMA synchronous = OFF") #journal disabled since we never do rollbacks self.connection.execute("PRAGMA journal_mode = %s" % headphones.CONFIG.JOURNAL_MODE) #64mb of cache memory,probably need to make it user configurable self.connection.execute("PRAGMA cache_size=-%s" % (getCacheSize() * 1024)) self.connection.row_factory = sqlite3.Row def action(self, query, args=None): if query is None: return sqlResult = None try: with self.connection as c: if args is None: sqlResult = c.execute(query) else: sqlResult = c.execute(query, args) except sqlite3.OperationalError, e: if "unable to open database file" in e.message or "database is locked" in e.message: logger.warn('Database Error: %s', e) else: logger.error('Database error: %s', e) raise except sqlite3.DatabaseError, e: logger.error('Fatal Error executing %s :: %s', query, e) raise return sqlResult def select(self, query, args=None): sqlResults = self.action(query, args).fetchall() if sqlResults is None or sqlResults == [None]: return [] return sqlResults def upsert(self, tableName, valueDict, keyDict): changesBefore = self.connection.total_changes genParams = lambda myDict: [x + " = ?" for x in myDict.keys()] update_query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict)) self.action(update_query, valueDict.values() + keyDict.values()) if self.connection.total_changes == changesBefore: insert_query = ( "INSERT INTO " + tableName + " (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + " VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")" ) try: self.action(insert_query, valueDict.values() + keyDict.values()) except sqlite3.IntegrityError: logger.info('Queries failed: %s and %s', update_query, insert_query)
gpl-2.0
bygreencn/DIGITS
plugins/data/imageGradients/digitsDataPluginImageGradients/data.py
3
3492
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from digits.utils import subclass, override, constants from digits.extensions.data.interface import DataIngestionInterface from .forms import DatasetForm, InferenceForm import numpy as np import os TEMPLATE = "templates/template.html" INFERENCE_TEMPLATE = "templates/inference_template.html" @subclass class DataIngestion(DataIngestionInterface): """ A data ingestion extension for an image gradient dataset """ def __init__(self, is_inference_db=False, **kwargs): super(DataIngestion, self).__init__(**kwargs) self.userdata['is_inference_db'] = is_inference_db # Used to calculate the gradients later self.yy, self.xx = np.mgrid[:self.image_height, :self.image_width].astype('float') @override def encode_entry(self, entry): xslope, yslope = entry label = np.array([xslope, yslope]) a = xslope * 255 / self.image_width b = yslope * 255 / self.image_height image = a * (self.xx - self.image_width/2) + b * (self.yy - self.image_height/2) + 127.5 image = image.astype('uint8') # convert to 3D tensors image = image[np.newaxis, ...] label = label[np.newaxis, np.newaxis, ...] return image, label @staticmethod @override def get_category(): return "Images" @staticmethod @override def get_id(): return "image-gradients" @staticmethod @override def get_dataset_form(): return DatasetForm() @staticmethod @override def get_dataset_template(form): """ parameters: - form: form returned by get_dataset_form(). This may be populated with values if the job was cloned return: - (template, context) tuple - template is a Jinja template to use for rendering dataset creation options - context is a dictionary of context variables to use for rendering the form """ extension_dir = os.path.dirname(os.path.abspath(__file__)) template = open(os.path.join(extension_dir, TEMPLATE), "r").read() context = {'form': form} return (template, context) @override def get_inference_form(self): return InferenceForm() @staticmethod @override def get_inference_template(form): extension_dir = os.path.dirname(os.path.abspath(__file__)) template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read() context = {'form': form} return (template, context) @staticmethod @override def get_title(): return "Gradients" @override def itemize_entries(self, stage): count = 0 if self.userdata['is_inference_db']: if stage == constants.TEST_DB: if self.test_image_count: count = self.test_image_count else: return [(self.gradient_x, self.gradient_y)] else: if stage == constants.TRAIN_DB: count = self.train_image_count elif stage == constants.VAL_DB: count = self.val_image_count elif stage == constants.TEST_DB: count = self.test_image_count return [np.random.random_sample(2) - 0.5 for i in xrange(count)] if count > 0 else []
bsd-3-clause
denisov-vlad/redash
tests/test_models.py
3
27473
import calendar import datetime from unittest import TestCase import pytz from dateutil.parser import parse as date_parse from tests import BaseTestCase from redash import models, redis_connection from redash.models import db, types from redash.utils import gen_query_hash, utcnow class DashboardTest(BaseTestCase): def test_appends_suffix_to_slug_when_duplicate(self): d1 = self.factory.create_dashboard() db.session.flush() self.assertEqual(d1.slug, "test") d2 = self.factory.create_dashboard(user=d1.user) db.session.flush() self.assertNotEqual(d1.slug, d2.slug) d3 = self.factory.create_dashboard(user=d1.user) db.session.flush() self.assertNotEqual(d1.slug, d3.slug) self.assertNotEqual(d2.slug, d3.slug) class ShouldScheduleNextTest(TestCase): def test_interval_schedule_that_needs_reschedule(self): now = utcnow() two_hours_ago = now - datetime.timedelta(hours=2) self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600")) def test_interval_schedule_that_doesnt_need_reschedule(self): now = utcnow() half_an_hour_ago = now - datetime.timedelta(minutes=30) self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600")) def test_exact_time_that_needs_reschedule(self): now = utcnow() yesterday = now - datetime.timedelta(days=1) scheduled_datetime = now - datetime.timedelta(hours=3) scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) self.assertTrue( models.should_schedule_next(yesterday, now, "86400", scheduled_time) ) def test_exact_time_that_doesnt_need_reschedule(self): now = date_parse("2015-10-16 20:10") yesterday = date_parse("2015-10-15 23:07") schedule = "23:00" self.assertFalse(models.should_schedule_next(yesterday, now, "86400", schedule)) def test_exact_time_with_day_change(self): now = utcnow().replace(hour=0, minute=1) previous = (now - datetime.timedelta(days=2)).replace(hour=23, minute=59) schedule = "23:59".format(now.hour + 3) self.assertTrue(models.should_schedule_next(previous, now, "86400", schedule)) def test_exact_time_every_x_days_that_needs_reschedule(self): now = utcnow() four_days_ago = now - datetime.timedelta(days=4) three_day_interval = "259200" scheduled_datetime = now - datetime.timedelta(hours=3) scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) self.assertTrue( models.should_schedule_next( four_days_ago, now, three_day_interval, scheduled_time ) ) def test_exact_time_every_x_days_that_doesnt_need_reschedule(self): now = utcnow() four_days_ago = now - datetime.timedelta(days=2) three_day_interval = "259200" scheduled_datetime = now - datetime.timedelta(hours=3) scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) self.assertFalse( models.should_schedule_next( four_days_ago, now, three_day_interval, scheduled_time ) ) def test_exact_time_every_x_days_with_day_change(self): now = utcnow().replace(hour=23, minute=59) previous = (now - datetime.timedelta(days=2)).replace(hour=0, minute=1) schedule = "23:58" three_day_interval = "259200" self.assertTrue( models.should_schedule_next(previous, now, three_day_interval, schedule) ) def test_exact_time_every_x_weeks_that_needs_reschedule(self): # Setup: # # 1) The query should run every 3 weeks on Tuesday # 2) The last time it ran was 3 weeks ago from this week's Thursday # 3) It is now Wednesday of this week # # Expectation: Even though less than 3 weeks have passed since the # last run 3 weeks ago on Thursday, it's overdue since # it should be running on Tuesdays. this_thursday = utcnow() + datetime.timedelta( days=list(calendar.day_name).index("Thursday") - utcnow().weekday() ) three_weeks_ago = this_thursday - datetime.timedelta(weeks=3) now = this_thursday - datetime.timedelta(days=1) three_week_interval = "1814400" scheduled_datetime = now - datetime.timedelta(hours=3) scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) self.assertTrue( models.should_schedule_next( three_weeks_ago, now, three_week_interval, scheduled_time, "Tuesday" ) ) def test_exact_time_every_x_weeks_that_doesnt_need_reschedule(self): # Setup: # # 1) The query should run every 3 weeks on Thurday # 2) The last time it ran was 3 weeks ago from this week's Tuesday # 3) It is now Wednesday of this week # # Expectation: Even though more than 3 weeks have passed since the # last run 3 weeks ago on Tuesday, it's not overdue since # it should be running on Thursdays. this_tuesday = utcnow() + datetime.timedelta( days=list(calendar.day_name).index("Tuesday") - utcnow().weekday() ) three_weeks_ago = this_tuesday - datetime.timedelta(weeks=3) now = this_tuesday + datetime.timedelta(days=1) three_week_interval = "1814400" scheduled_datetime = now - datetime.timedelta(hours=3) scheduled_time = "{:02d}:00".format(scheduled_datetime.hour) self.assertFalse( models.should_schedule_next( three_weeks_ago, now, three_week_interval, scheduled_time, "Thursday" ) ) def test_backoff(self): now = utcnow() two_hours_ago = now - datetime.timedelta(hours=2) self.assertTrue( models.should_schedule_next(two_hours_ago, now, "3600", failures=5) ) self.assertFalse( models.should_schedule_next(two_hours_ago, now, "3600", failures=10) ) def test_next_iteration_overflow(self): now = utcnow() two_hours_ago = now - datetime.timedelta(hours=2) self.assertFalse( models.should_schedule_next(two_hours_ago, now, "3600", failures=32) ) class QueryOutdatedQueriesTest(BaseTestCase): def schedule(self, **kwargs): schedule = {"interval": None, "time": None, "until": None, "day_of_week": None} schedule.update(**kwargs) return schedule def create_scheduled_query(self, **kwargs): return self.factory.create_query(schedule=self.schedule(**kwargs)) def fake_previous_execution(self, query, **kwargs): retrieved_at = utcnow() - datetime.timedelta(**kwargs) query_result = self.factory.create_query_result( retrieved_at=retrieved_at, query_text=query.query_text, query_hash=query.query_hash, ) query.latest_query_data = query_result # TODO: this test can be refactored to use mock version of should_schedule_next to simplify it. def test_outdated_queries_skips_unscheduled_queries(self): query = self.create_scheduled_query() query_with_none = self.factory.create_query(schedule=None) queries = models.Query.outdated_queries() self.assertNotIn(query, queries) self.assertNotIn(query_with_none, queries) def test_outdated_queries_works_with_ttl_based_schedule(self): query = self.create_scheduled_query(interval="3600") self.fake_previous_execution(query, hours=2) queries = models.Query.outdated_queries() self.assertIn(query, queries) def test_outdated_queries_works_scheduled_queries_tracker(self): query = self.create_scheduled_query(interval="3600") self.fake_previous_execution(query, hours=2) models.scheduled_queries_executions.update(query.id) queries = models.Query.outdated_queries() self.assertNotIn(query, queries) def test_skips_fresh_queries(self): query = self.create_scheduled_query(interval="3600") self.fake_previous_execution(query, minutes=30) queries = models.Query.outdated_queries() self.assertNotIn(query, queries) def test_outdated_queries_works_with_specific_time_schedule(self): half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30) query = self.create_scheduled_query(interval="86400", time=half_an_hour_ago.strftime("%H:%M")) query_result = self.factory.create_query_result( query=query.query_text, retrieved_at=half_an_hour_ago - datetime.timedelta(days=1), ) query.latest_query_data = query_result queries = models.Query.outdated_queries() self.assertIn(query, queries) def test_enqueues_query_only_once(self): """ Only one query per data source with the same text will be reported by Query.outdated_queries(). """ query = self.create_scheduled_query(interval="60") query2 = self.factory.create_query( schedule=self.schedule(interval="60"), query_text=query.query_text, query_hash=query.query_hash, ) self.fake_previous_execution(query, minutes=10) self.fake_previous_execution(query2, minutes=10) self.assertEqual(list(models.Query.outdated_queries()), [query2]) def test_enqueues_query_with_correct_data_source(self): """ Queries from different data sources will be reported by Query.outdated_queries() even if they have the same query text. """ query = self.factory.create_query( schedule=self.schedule(interval="60"), data_source=self.factory.create_data_source(), ) query2 = self.factory.create_query( schedule=self.schedule(interval="60"), query_text=query.query_text, query_hash=query.query_hash, ) self.fake_previous_execution(query, minutes=10) self.fake_previous_execution(query2, minutes=10) outdated_queries = models.Query.outdated_queries() self.assertEqual(len(outdated_queries), 2) self.assertIn(query, outdated_queries) self.assertIn(query2, outdated_queries) def test_enqueues_only_for_relevant_data_source(self): """ If multiple queries with the same text exist, only ones that are scheduled to be refreshed are reported by Query.outdated_queries(). """ query = self.create_scheduled_query(interval="60") query2 = self.factory.create_query( schedule=self.schedule(interval="3600"), query_text=query.query_text, query_hash=query.query_hash, ) self.fake_previous_execution(query, minutes=10) self.fake_previous_execution(query2, minutes=10) self.assertEqual(list(models.Query.outdated_queries()), [query]) def test_failure_extends_schedule(self): """ Execution failures recorded for a query result in exponential backoff for scheduling future execution. """ query = self.factory.create_query( schedule=self.schedule(interval="60"), schedule_failures=4, ) self.fake_previous_execution(query, minutes=16) self.assertEqual(list(models.Query.outdated_queries()), []) self.fake_previous_execution(query, minutes=17) self.assertEqual(list(models.Query.outdated_queries()), [query]) def test_schedule_until_after(self): """ Queries with non-null ``schedule['until']`` are not reported by Query.outdated_queries() after the given time is past. """ one_day_ago = (utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d") query = self.create_scheduled_query(interval="3600", until=one_day_ago) self.fake_previous_execution(query, hours=2) queries = models.Query.outdated_queries() self.assertNotIn(query, queries) def test_schedule_until_before(self): """ Queries with non-null ``schedule['until']`` are reported by Query.outdated_queries() before the given time is past. """ one_day_from_now = (utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%d") query = self.create_scheduled_query(interval="3600", until=one_day_from_now) self.fake_previous_execution(query, hours=2) queries = models.Query.outdated_queries() self.assertIn(query, queries) def test_skips_and_disables_faulty_queries(self): faulty_query = self.create_scheduled_query(until="pigs fly") valid_query = self.create_scheduled_query(interval="60") self.fake_previous_execution(valid_query, minutes=10) queries = models.Query.outdated_queries() self.assertEqual(list(models.Query.outdated_queries()), [valid_query]) self.assertTrue(faulty_query.schedule.get("disabled")) def test_skips_disabled_schedules(self): query = self.create_scheduled_query(disabled=True) queries = models.Query.outdated_queries() self.assertNotIn(query, queries) class QueryArchiveTest(BaseTestCase): def test_archive_query_sets_flag(self): query = self.factory.create_query() db.session.flush() query.archive() self.assertEqual(query.is_archived, True) def test_archived_query_doesnt_return_in_all(self): query = self.factory.create_query( schedule={"interval": "1", "until": None, "time": None, "day_of_week": None} ) yesterday = utcnow() - datetime.timedelta(days=1) query_result = models.QueryResult.store_result( query.org_id, query.data_source, query.query_hash, query.query_text, "1", 123, yesterday, ) query.latest_query_data = query_result groups = list(models.Group.query.filter(models.Group.id.in_(query.groups))) self.assertIn(query, list(models.Query.all_queries([g.id for g in groups]))) self.assertIn(query, models.Query.outdated_queries()) db.session.flush() query.archive() self.assertNotIn(query, list(models.Query.all_queries([g.id for g in groups]))) self.assertNotIn(query, models.Query.outdated_queries()) def test_removes_associated_widgets_from_dashboards(self): widget = self.factory.create_widget() query = widget.visualization.query_rel db.session.commit() query.archive() db.session.flush() self.assertEqual(models.Widget.query.get(widget.id), None) def test_removes_scheduling(self): query = self.factory.create_query( schedule={"interval": "1", "until": None, "time": None, "day_of_week": None} ) query.archive() self.assertIsNone(query.schedule) def test_deletes_alerts(self): subscription = self.factory.create_alert_subscription() query = subscription.alert.query_rel db.session.commit() query.archive() db.session.flush() self.assertEqual(models.Alert.query.get(subscription.alert.id), None) self.assertEqual(models.AlertSubscription.query.get(subscription.id), None) class TestUnusedQueryResults(BaseTestCase): def test_returns_only_unused_query_results(self): two_weeks_ago = utcnow() - datetime.timedelta(days=14) qr = self.factory.create_query_result() self.factory.create_query(latest_query_data=qr) db.session.flush() unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago) self.assertIn(unused_qr, list(models.QueryResult.unused())) self.assertNotIn(qr, list(models.QueryResult.unused())) def test_returns_only_over_a_week_old_results(self): two_weeks_ago = utcnow() - datetime.timedelta(days=14) unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago) db.session.flush() new_unused_qr = self.factory.create_query_result() self.assertIn(unused_qr, list(models.QueryResult.unused())) self.assertNotIn(new_unused_qr, list(models.QueryResult.unused())) class TestQueryAll(BaseTestCase): def test_returns_only_queries_in_given_groups(self): ds1 = self.factory.create_data_source() ds2 = self.factory.create_data_source() group1 = models.Group(name="g1", org=ds1.org, permissions=["create", "view"]) group2 = models.Group(name="g2", org=ds1.org, permissions=["create", "view"]) q1 = self.factory.create_query(data_source=ds1) q2 = self.factory.create_query(data_source=ds2) db.session.add_all( [ ds1, ds2, group1, group2, q1, q2, models.DataSourceGroup(group=group1, data_source=ds1), models.DataSourceGroup(group=group2, data_source=ds2), ] ) db.session.flush() self.assertIn(q1, list(models.Query.all_queries([group1.id]))) self.assertNotIn(q2, list(models.Query.all_queries([group1.id]))) self.assertIn(q1, list(models.Query.all_queries([group1.id, group2.id]))) self.assertIn(q2, list(models.Query.all_queries([group1.id, group2.id]))) def test_skips_drafts(self): q = self.factory.create_query(is_draft=True) self.assertNotIn(q, models.Query.all_queries([self.factory.default_group.id])) def test_includes_drafts_of_given_user(self): q = self.factory.create_query(is_draft=True) self.assertIn( q, models.Query.all_queries( [self.factory.default_group.id], user_id=q.user_id ), ) def test_order_by_relationship(self): u1 = self.factory.create_user(name="alice") u2 = self.factory.create_user(name="bob") self.factory.create_query(user=u1) self.factory.create_query(user=u2) db.session.commit() # have to reset the order here with None since all_queries orders by # created_at by default base = models.Query.all_queries([self.factory.default_group.id]).order_by(None) qs1 = base.order_by(models.User.name) self.assertEqual(["alice", "bob"], [q.user.name for q in qs1]) qs2 = base.order_by(models.User.name.desc()) self.assertEqual(["bob", "alice"], [q.user.name for q in qs2]) def test_update_query_hash_basesql_with_options(self): ds = self.factory.create_data_source( group=self.factory.org.default_group, type="pg" ) query = self.factory.create_query(query_text="SELECT 2", data_source=ds) query.options = {"apply_auto_limit": True} origin_hash = query.query_hash query.update_query_hash() self.assertNotEqual(origin_hash, query.query_hash) def test_update_query_hash_basesql_no_options(self): ds = self.factory.create_data_source( group=self.factory.org.default_group, type="pg" ) query = self.factory.create_query(query_text="SELECT 2", data_source=ds) query.options = {} origin_hash = query.query_hash query.update_query_hash() self.assertEqual(origin_hash, query.query_hash) def test_update_query_hash_non_basesql(self): ds = self.factory.create_data_source( group=self.factory.org.default_group, type="prometheus" ) query = self.factory.create_query(query_text="SELECT 2", data_source=ds) query.options = {"apply_auto_limit": True} origin_hash = query.query_hash query.update_query_hash() self.assertEqual(origin_hash, query.query_hash) class TestGroup(BaseTestCase): def test_returns_groups_with_specified_names(self): org1 = self.factory.create_org() org2 = self.factory.create_org() matching_group1 = models.Group(id=999, name="g1", org=org1) matching_group2 = models.Group(id=888, name="g2", org=org1) non_matching_group = models.Group(id=777, name="g1", org=org2) groups = models.Group.find_by_name(org1, ["g1", "g2"]) self.assertIn(matching_group1, groups) self.assertIn(matching_group2, groups) self.assertNotIn(non_matching_group, groups) def test_returns_no_groups(self): org1 = self.factory.create_org() models.Group(id=999, name="g1", org=org1) self.assertEqual([], models.Group.find_by_name(org1, ["non-existing"])) class TestQueryResultStoreResult(BaseTestCase): def setUp(self): super(TestQueryResultStoreResult, self).setUp() self.data_source = self.factory.data_source self.query = "SELECT 1" self.query_hash = gen_query_hash(self.query) self.runtime = 123 self.utcnow = utcnow() self.data = '{"a": 1}' def test_stores_the_result(self): query_result = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, self.data, self.runtime, self.utcnow, ) self.assertEqual(query_result._data, self.data) self.assertEqual(query_result.runtime, self.runtime) self.assertEqual(query_result.retrieved_at, self.utcnow) self.assertEqual(query_result.query_text, self.query) self.assertEqual(query_result.query_hash, self.query_hash) self.assertEqual(query_result.data_source, self.data_source) class TestEvents(BaseTestCase): def raw_event(self): timestamp = 1411778709.791 user = self.factory.user created_at = datetime.datetime.utcfromtimestamp(timestamp) db.session.flush() raw_event = { "action": "view", "timestamp": timestamp, "object_type": "dashboard", "user_id": user.id, "object_id": 1, "org_id": 1, } return raw_event, user, created_at def test_records_event(self): raw_event, user, created_at = self.raw_event() event = models.Event.record(raw_event) db.session.flush() self.assertEqual(event.user, user) self.assertEqual(event.action, "view") self.assertEqual(event.object_type, "dashboard") self.assertEqual(event.object_id, 1) self.assertEqual(event.created_at, created_at) def test_records_additional_properties(self): raw_event, _, _ = self.raw_event() additional_properties = {"test": 1, "test2": 2, "whatever": "abc"} raw_event.update(additional_properties) event = models.Event.record(raw_event) self.assertDictEqual(event.additional_properties, additional_properties) def _set_up_dashboard_test(d): d.g1 = d.factory.create_group(name="First", permissions=["create", "view"]) d.g2 = d.factory.create_group(name="Second", permissions=["create", "view"]) d.ds1 = d.factory.create_data_source() d.ds2 = d.factory.create_data_source() db.session.flush() d.u1 = d.factory.create_user(group_ids=[d.g1.id]) d.u2 = d.factory.create_user(group_ids=[d.g2.id]) db.session.add_all( [ models.DataSourceGroup(group=d.g1, data_source=d.ds1), models.DataSourceGroup(group=d.g2, data_source=d.ds2), ] ) d.q1 = d.factory.create_query(data_source=d.ds1) d.q2 = d.factory.create_query(data_source=d.ds2) d.v1 = d.factory.create_visualization(query_rel=d.q1) d.v2 = d.factory.create_visualization(query_rel=d.q2) d.w1 = d.factory.create_widget(visualization=d.v1) d.w2 = d.factory.create_widget(visualization=d.v2) d.w3 = d.factory.create_widget(visualization=d.v2, dashboard=d.w2.dashboard) d.w4 = d.factory.create_widget(visualization=d.v2) d.w5 = d.factory.create_widget(visualization=d.v1, dashboard=d.w4.dashboard) d.w1.dashboard.is_draft = False d.w2.dashboard.is_draft = False d.w4.dashboard.is_draft = False class TestDashboardAll(BaseTestCase): def setUp(self): super(TestDashboardAll, self).setUp() _set_up_dashboard_test(self) def test_requires_group_or_user_id(self): d1 = self.factory.create_dashboard() self.assertNotIn( d1, list(models.Dashboard.all(d1.user.org, d1.user.group_ids, None)) ) l2 = list(models.Dashboard.all(d1.user.org, [0], d1.user.id)) self.assertIn(d1, l2) def test_returns_dashboards_based_on_groups(self): self.assertIn( self.w1.dashboard, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, None)), ) self.assertIn( self.w2.dashboard, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None)), ) self.assertNotIn( self.w1.dashboard, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None)), ) self.assertNotIn( self.w2.dashboard, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, None)), ) def test_returns_each_dashboard_once(self): dashboards = list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None)) self.assertEqual(len(dashboards), 2) def test_returns_dashboard_you_have_partial_access_to(self): self.assertIn( self.w5.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None), ) def test_returns_dashboards_created_by_user(self): d1 = self.factory.create_dashboard(user=self.u1) db.session.flush() self.assertIn( d1, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, self.u1.id)) ) self.assertIn(d1, list(models.Dashboard.all(self.u1.org, [0], self.u1.id))) self.assertNotIn( d1, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, self.u2.id)) ) def test_returns_dashboards_with_text_widgets_to_creator(self): w1 = self.factory.create_widget(visualization=None) self.assertEqual(w1.dashboard.user, self.factory.user) self.assertIn( w1.dashboard, list( models.Dashboard.all( self.factory.user.org, self.factory.user.group_ids, self.factory.user.id, ) ), ) self.assertNotIn( w1.dashboard, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, self.u1.id)), ) def test_returns_dashboards_from_current_org_only(self): w1 = self.factory.create_widget() user = self.factory.create_user(org=self.factory.create_org()) self.assertIn( w1.dashboard, list( models.Dashboard.all( self.factory.user.org, self.factory.user.group_ids, None ) ), ) self.assertNotIn( w1.dashboard, list(models.Dashboard.all(user.org, user.group_ids, user.id)) )
bsd-2-clause
Mozhuowen/brython
www/src/Lib/test/test_asynchat.py
89
9302
# test asynchat from test import support # If this fails, the test will be skipped. thread = support.import_module('_thread') import asyncore, asynchat, socket, time import unittest import sys try: import threading except ImportError: threading = None HOST = support.HOST SERVER_QUIT = b'QUIT\n' if threading: class echo_server(threading.Thread): # parameter to determine the number of bytes passed back to the # client each send chunk_size = 1 def __init__(self, event): threading.Thread.__init__(self) self.event = event self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = support.bind_port(self.sock) # This will be set if the client wants us to wait before echoing data # back. self.start_resend_event = None def run(self): self.sock.listen(1) self.event.set() conn, client = self.sock.accept() self.buffer = b"" # collect data until quit message is seen while SERVER_QUIT not in self.buffer: data = conn.recv(1) if not data: break self.buffer = self.buffer + data # remove the SERVER_QUIT message self.buffer = self.buffer.replace(SERVER_QUIT, b'') if self.start_resend_event: self.start_resend_event.wait() # re-send entire set of collected data try: # this may fail on some tests, such as test_close_when_done, since # the client closes the channel when it's done sending while self.buffer: n = conn.send(self.buffer[:self.chunk_size]) time.sleep(0.001) self.buffer = self.buffer[n:] except: pass conn.close() self.sock.close() class echo_client(asynchat.async_chat): def __init__(self, terminator, server_port): asynchat.async_chat.__init__(self) self.contents = [] self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect((HOST, server_port)) self.set_terminator(terminator) self.buffer = b"" def handle_connect(self): pass if sys.platform == 'darwin': # select.poll returns a select.POLLHUP at the end of the tests # on darwin, so just ignore it def handle_expt(self): pass def collect_incoming_data(self, data): self.buffer += data def found_terminator(self): self.contents.append(self.buffer) self.buffer = b"" def start_echo_server(): event = threading.Event() s = echo_server(event) s.start() event.wait() event.clear() time.sleep(0.01) # Give server time to start accepting. return s, event @unittest.skipUnless(threading, 'Threading required for this test.') class TestAsynchat(unittest.TestCase): usepoll = False def setUp (self): self._threads = support.threading_setup() def tearDown (self): support.threading_cleanup(*self._threads) def line_terminator_check(self, term, server_chunk): event = threading.Event() s = echo_server(event) s.chunk_size = server_chunk s.start() event.wait() event.clear() time.sleep(0.01) # Give server time to start accepting. c = echo_client(term, s.port) c.push(b"hello ") c.push(b"world" + term) c.push(b"I'm not dead yet!" + term) c.push(SERVER_QUIT) asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) s.join() self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"]) # the line terminator tests below check receiving variously-sized # chunks back from the server in order to exercise all branches of # async_chat.handle_read def test_line_terminator1(self): # test one-character terminator for l in (1,2,3): self.line_terminator_check(b'\n', l) def test_line_terminator2(self): # test two-character terminator for l in (1,2,3): self.line_terminator_check(b'\r\n', l) def test_line_terminator3(self): # test three-character terminator for l in (1,2,3): self.line_terminator_check(b'qqq', l) def numeric_terminator_check(self, termlen): # Try reading a fixed number of bytes s, event = start_echo_server() c = echo_client(termlen, s.port) data = b"hello world, I'm not dead yet!\n" c.push(data) c.push(SERVER_QUIT) asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) s.join() self.assertEqual(c.contents, [data[:termlen]]) def test_numeric_terminator1(self): # check that ints & longs both work (since type is # explicitly checked in async_chat.handle_read) self.numeric_terminator_check(1) def test_numeric_terminator2(self): self.numeric_terminator_check(6) def test_none_terminator(self): # Try reading a fixed number of bytes s, event = start_echo_server() c = echo_client(None, s.port) data = b"hello world, I'm not dead yet!\n" c.push(data) c.push(SERVER_QUIT) asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) s.join() self.assertEqual(c.contents, []) self.assertEqual(c.buffer, data) def test_simple_producer(self): s, event = start_echo_server() c = echo_client(b'\n', s.port) data = b"hello world\nI'm not dead yet!\n" p = asynchat.simple_producer(data+SERVER_QUIT, buffer_size=8) c.push_with_producer(p) asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) s.join() self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"]) def test_string_producer(self): s, event = start_echo_server() c = echo_client(b'\n', s.port) data = b"hello world\nI'm not dead yet!\n" c.push_with_producer(data+SERVER_QUIT) asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) s.join() self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"]) def test_empty_line(self): # checks that empty lines are handled correctly s, event = start_echo_server() c = echo_client(b'\n', s.port) c.push(b"hello world\n\nI'm not dead yet!\n") c.push(SERVER_QUIT) asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) s.join() self.assertEqual(c.contents, [b"hello world", b"", b"I'm not dead yet!"]) def test_close_when_done(self): s, event = start_echo_server() s.start_resend_event = threading.Event() c = echo_client(b'\n', s.port) c.push(b"hello world\nI'm not dead yet!\n") c.push(SERVER_QUIT) c.close_when_done() asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01) # Only allow the server to start echoing data back to the client after # the client has closed its connection. This prevents a race condition # where the server echoes all of its data before we can check that it # got any down below. s.start_resend_event.set() s.join() self.assertEqual(c.contents, []) # the server might have been able to send a byte or two back, but this # at least checks that it received something and didn't just fail # (which could still result in the client not having received anything) self.assertGreater(len(s.buffer), 0) class TestAsynchat_WithPoll(TestAsynchat): usepoll = True class TestHelperFunctions(unittest.TestCase): def test_find_prefix_at_end(self): self.assertEqual(asynchat.find_prefix_at_end("qwerty\r", "\r\n"), 1) self.assertEqual(asynchat.find_prefix_at_end("qwertydkjf", "\r\n"), 0) class TestFifo(unittest.TestCase): def test_basic(self): f = asynchat.fifo() f.push(7) f.push(b'a') self.assertEqual(len(f), 2) self.assertEqual(f.first(), 7) self.assertEqual(f.pop(), (1, 7)) self.assertEqual(len(f), 1) self.assertEqual(f.first(), b'a') self.assertEqual(f.is_empty(), False) self.assertEqual(f.pop(), (1, b'a')) self.assertEqual(len(f), 0) self.assertEqual(f.is_empty(), True) self.assertEqual(f.pop(), (0, None)) def test_given_list(self): f = asynchat.fifo([b'x', 17, 3]) self.assertEqual(len(f), 3) self.assertEqual(f.pop(), (1, b'x')) self.assertEqual(f.pop(), (1, 17)) self.assertEqual(f.pop(), (1, 3)) self.assertEqual(f.pop(), (0, None)) def test_main(verbose=None): support.run_unittest(TestAsynchat, TestAsynchat_WithPoll, TestHelperFunctions, TestFifo) if __name__ == "__main__": test_main(verbose=True)
bsd-3-clause
tsdmgz/ansible
lib/ansible/modules/system/lvol.py
13
17067
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2013, Jeroen Hoekx <[email protected]>, Alexander Bulimov <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: - Jeroen Hoekx (@jhoekx) - Alexander Bulimov (@abulimov) module: lvol short_description: Configure LVM logical volumes description: - This module creates, removes or resizes logical volumes. version_added: "1.1" options: vg: description: - The volume group this logical volume is part of. required: true lv: description: - The name of the logical volume. required: true size: description: - The size of the logical volume, according to lvcreate(8) --size, by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; Float values must begin with a digit. Resizing using percentage values was not supported prior to 2.1. state: description: - Control if the logical volume exists. If C(present) and the volume does not already exist then the C(size) option is required. choices: [ absent, present ] default: present active: description: - Whether the volume is activate and visible to the host. type: bool default: 'yes' version_added: "2.2" force: description: - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. type: bool default: 'no' version_added: "1.5" opts: description: - Free-form options to be passed to the lvcreate command. version_added: "2.0" snapshot: description: - The name of the snapshot volume version_added: "2.1" pvs: description: - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb). version_added: "2.2" shrink: description: - Shrink if current size is higher than size requested. type: bool default: 'yes' version_added: "2.2" resizefs: description: - Resize the underlying filesystem together with the logical volume. type: bool default: 'yes' version_added: "2.5" ''' EXAMPLES = ''' - name: Create a logical volume of 512m lvol: vg: firefly lv: test size: 512 - name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb lvol: vg: firefly lv: test size: 512 pvs: /dev/sda,/dev/sdb - name: Create cache pool logical volume lvol: vg: firefly lv: lvcache size: 512m opts: --type cache-pool - name: Create a logical volume of 512g. lvol: vg: firefly lv: test size: 512g - name: Create a logical volume the size of all remaining space in the volume group lvol: vg: firefly lv: test size: 100%FREE - name: Create a logical volume with special options lvol: vg: firefly lv: test size: 512g opts: -r 16 - name: Extend the logical volume to 1024m. lvol: vg: firefly lv: test size: 1024 - name: Extend the logical volume to consume all remaining space in the volume group lvol: vg: firefly lv: test size: +100%FREE - name: Extend the logical volume to take all remaining space of the PVs lvol: vg: firefly lv: test size: 100%PVS resizefs: true - name: Resize the logical volume to % of VG lvol: vg: firefly lv: test size: 80%VG force: yes - name: Reduce the logical volume to 512m lvol: vg: firefly lv: test size: 512 force: yes - name: Set the logical volume to 512m and do not try to shrink if size is lower than current one lvol: vg: firefly lv: test size: 512 shrink: no - name: Remove the logical volume. lvol: vg: firefly lv: test state: absent force: yes - name: Create a snapshot volume of the test logical volume. lvol: vg: firefly lv: test snapshot: snap1 size: 100m - name: Deactivate a logical volume lvol: vg: firefly lv: test active: false - name: Create a deactivated logical volume lvol: vg: firefly lv: test size: 512g active: false ''' import re from ansible.module_utils.basic import AnsibleModule decimal_point = re.compile(r"(\d+)") def mkversion(major, minor, patch): return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) def parse_lvs(data): lvs = [] for line in data.splitlines(): parts = line.strip().split(';') lvs.append({ 'name': parts[0].replace('[', '').replace(']', ''), 'size': int(decimal_point.match(parts[1]).group(1)), 'active': (parts[2][4] == 'a') }) return lvs def parse_vgs(data): vgs = [] for line in data.splitlines(): parts = line.strip().split(';') vgs.append({ 'name': parts[0], 'size': int(decimal_point.match(parts[1]).group(1)), 'free': int(decimal_point.match(parts[2]).group(1)), 'ext_size': int(decimal_point.match(parts[3]).group(1)) }) return vgs def get_lvm_version(module): ver_cmd = module.get_bin_path("lvm", required=True) rc, out, err = module.run_command("%s version" % (ver_cmd)) if rc != 0: return None m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) if not m: return None return mkversion(m.group(1), m.group(2), m.group(3)) def main(): module = AnsibleModule( argument_spec=dict( vg=dict(type='str', required=True), lv=dict(type='str', required=True), size=dict(type='str'), opts=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), shrink=dict(type='bool', default=True), active=dict(type='bool', default=True), snapshot=dict(type='str'), pvs=dict(type='str'), resizefs=dict(type='bool', default=False), ), supports_check_mode=True, ) # Determine if the "--yes" option should be used version_found = get_lvm_version(module) if version_found is None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: yesopt = "--yes" else: yesopt = "" vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) active = module.boolean(module.params['active']) resizefs = module.boolean(module.params['resizefs']) size_opt = 'L' size_unit = 'm' snapshot = module.params['snapshot'] pvs = module.params['pvs'] if pvs is None: pvs = "" else: pvs = pvs.replace(",", " ") if opts is None: opts = "" # Add --test option when running in check-mode if module.check_mode: test_opt = ' --test' else: test_opt = '' if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' if '%' not in size: # LVCREATE(8) -L --size option unit if size[-1].lower() in 'bskmgtpe': size_unit = size[-1].lower() size = size[0:-1] try: float(size) if not size[0].isdigit(): raise ValueError() except ValueError: module.fail_json(msg="Bad size specification of '%s'" % size) # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) vgs = parse_vgs(current_vgs) this_vg = vgs[0] # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) if snapshot is None: check_lv = lv else: check_lv = snapshot for test_lv in lvs: if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): this_lv = test_lv break else: this_lv = None if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") msg = '' if this_lv is None: if state == 'present': # create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) if snapshot is not None: cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': # remove LV if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif not size: pass elif size_opt == 'l': # Resize LV based on % value tool = None size_free = this_vg['free'] if size_whole == 'VG' or size_whole == 'PVS': size_requested = size_percent * this_vg['size'] / 100 else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 if '+' in size: size_requested += this_lv['size'] if this_lv['size'] < size_requested: if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json( msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) ) elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large if size_requested == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) else: # resize LV based on absolute values tool = None if int(size) > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) elif shrink and int(size) < this_lv['size']: if int(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg) if __name__ == '__main__': main()
gpl-3.0
hendawy/drchrono-patient-education
pshare/sharebackend/models.py
1
3848
# std lib imports import datetime import md5 # django imports from django.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User from django.db.models.signals import post_save from django.conf import settings # third-party app imports # app imports from .tasks import email_group, update_patients def create_user_profile(sender, instance, created, **kwargs): if created: UserProfile.objects.create(user=instance) post_save.connect(create_user_profile, sender=User) def upload_file(instance, file_name): return '{0}{1}_{2}'.format( settings.UPLOAD_DIR, md5.new(str(datetime.datetime.now())).hexdigest(), file_name) class UserProfile(models.Model): user = models.OneToOneField(User) access_token = models.CharField('access_token', max_length=200, null=True) refresh_token = models.CharField('refresh_token', max_length=200, null=True) class BaseModel(models.Model): """ Base model for the created and modified date time """ created_date = models.DateTimeField(auto_now_add=True) modified_date = models.DateTimeField(auto_now=True) class Meta: abstract = True class Tag(BaseModel): """ Tags to group patients """ user = models.ForeignKey(User) text = models.CharField('tag', max_length=150, null=True) def tag_patients(self, patient_ids, callback=None, args=None): PatientTag.objects.bulk_create([PatientTag( tag=self, patient_id=patient_id) for patient_id in patient_ids]) if callback is not None and args is not None: callback(*args) def update_patients(self, user_id, patient_ids): if len(patient_ids) > 0: update_patients.apply_async(args=[self.id, user_id, patient_ids]) class PatientTag(BaseModel): """ Relating patients to certain tags """ tag = models.ForeignKey('Tag') patient_id = models.IntegerField(null=True) class Meta: verbose_name = _('Patient Tag') verbose_name_plural = _('Patient Tags') @classmethod def queryset_api_intersection(cls, queryset, api_patient_dict): patients = [] if queryset.count() < 1: return patients patiend_id_set = set(queryset.values_list('patient_id', flat=True)) for patient in api_patient_dict: if patient['id'] in patiend_id_set: patients.append(patient) return patients @classmethod def queryset_api_exclude(cls, queryset, api_patient_dict): patients = [] if queryset.count() < 1: return api_patient_dict patiend_id_set = set(queryset.values_list('patient_id', flat=True)) for patient in api_patient_dict: if patient['id'] not in patiend_id_set: patients.append(patient) return patients class File(BaseModel): """ Uploaded files to share with patient groups """ user = models.ForeignKey(User) file_name = models.CharField('file_name', max_length=150, null=True) shared_file = models.FileField( upload_to=upload_file, blank=False, null=False) def tag_file(self, tag_id_list, user): tags = Tag.objects.filter(id__in=tag_id_list, user=user) TagFileShare.objects.bulk_create([TagFileShare( file_share=self, tag=tag) for tag in tags]) for tag_id in tag_id_list: email_group.apply_async(args=[self.id, tag_id, user.username]) class TagFileShare(BaseModel): """ Relating files to tags """ file_share = models.ForeignKey('File') tag = models.ForeignKey('Tag') class PatientFileShare(BaseModel): """ Relating files to patients """ file_share = models.ForeignKey('File') patient_id = models.IntegerField(null=True)
mit
bitifirefly/edx-platform
common/djangoapps/track/views/__init__.py
74
6212
import datetime import json import pytz from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.shortcuts import redirect from django.views.decorators.csrf import ensure_csrf_cookie from edxmako.shortcuts import render_to_response from track import tracker from track import contexts from track import shim from track.models import TrackingLog from eventtracking import tracker as eventtracker def log_event(event): """Capture a event by sending it to the register trackers""" tracker.send(event) def _get_request_header(request, header_name, default=''): """Helper method to get header values from a request's META dict, if present.""" if request is not None and hasattr(request, 'META') and header_name in request.META: return request.META[header_name] else: return default def _get_request_value(request, value_name, default=''): """Helper method to get header values from a request's REQUEST dict, if present.""" if request is not None and hasattr(request, 'REQUEST') and value_name in request.REQUEST: return request.REQUEST[value_name] else: return default def user_track(request): """ Log when POST call to "event" URL is made by a user. Uses request.REQUEST to allow for GET calls. GET or POST call should provide "event_type", "event", and "page" arguments. """ try: username = request.user.username except: username = "anonymous" name = _get_request_value(request, 'event_type') data = _get_request_value(request, 'event', {}) page = _get_request_value(request, 'page') if isinstance(data, basestring) and len(data) > 0: try: data = json.loads(data) except ValueError: pass context_override = contexts.course_context_from_url(page) context_override['username'] = username context_override['event_source'] = 'browser' context_override['page'] = page with eventtracker.get_tracker().context('edx.course.browser', context_override): eventtracker.emit(name=name, data=data) return HttpResponse('success') def server_track(request, event_type, event, page=None): """ Log events related to server requests. Handle the situation where the request may be NULL, as may happen with management commands. """ if event_type.startswith("/event_logs") and request.user.is_staff: return # don't log try: username = request.user.username except: username = "anonymous" # define output: event = { "username": username, "ip": _get_request_header(request, 'REMOTE_ADDR'), "referer": _get_request_header(request, 'HTTP_REFERER'), "accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'), "event_source": "server", "event_type": event_type, "event": event, "agent": _get_request_header(request, 'HTTP_USER_AGENT'), "page": page, "time": datetime.datetime.utcnow(), "host": _get_request_header(request, 'SERVER_NAME'), "context": eventtracker.get_tracker().resolve_context(), } # Some duplicated fields are passed into event-tracking via the context by track.middleware. # Remove them from the event here since they are captured elsewhere. shim.remove_shim_context(event) log_event(event) def task_track(request_info, task_info, event_type, event, page=None): """ Logs tracking information for events occuring within celery tasks. The `event_type` is a string naming the particular event being logged, while `event` is a dict containing whatever additional contextual information is desired. The `request_info` is a dict containing information about the original task request. Relevant keys are `username`, `ip`, `agent`, and `host`. While the dict is required, the values in it are not, so that {} can be passed in. In addition, a `task_info` dict provides more information about the current task, to be stored with the `event` dict. This may also be an empty dict. The `page` parameter is optional, and allows the name of the page to be provided. """ # supplement event information with additional information # about the task in which it is running. full_event = dict(event, **task_info) # All fields must be specified, in case the tracking information is # also saved to the TrackingLog model. Get values from the task-level # information, or just add placeholder values. with eventtracker.get_tracker().context('edx.course.task', contexts.course_context_from_url(page)): event = { "username": request_info.get('username', 'unknown'), "ip": request_info.get('ip', 'unknown'), "event_source": "task", "event_type": event_type, "event": full_event, "agent": request_info.get('agent', 'unknown'), "page": page, "time": datetime.datetime.utcnow(), "host": request_info.get('host', 'unknown'), "context": eventtracker.get_tracker().resolve_context(), } log_event(event) @login_required @ensure_csrf_cookie def view_tracking_log(request, args=''): """View to output contents of TrackingLog model. For staff use only.""" if not request.user.is_staff: return redirect('/') nlen = 100 username = '' if args: for arg in args.split('/'): if arg.isdigit(): nlen = int(arg) if arg.startswith('username='): username = arg[9:] record_instances = TrackingLog.objects.all().order_by('-time') if username: record_instances = record_instances.filter(username=username) record_instances = record_instances[0:nlen] # fix dtstamp fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z" for rinst in record_instances: rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt) return render_to_response('tracking_log.html', {'records': record_instances})
agpl-3.0
webdev1001/ansible
v2/ansible/utils/vault.py
8
2127
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import subprocess from ansible import constants as C from ansible.errors import AnsibleError from ansible.utils.path import is_executable def read_vault_file(vault_password_file): """ Read a vault password from a file or if executable, execute the script and retrieve password from STDOUT """ this_path = os.path.realpath(os.path.expanduser(vault_password_file)) if not os.path.exists(this_path): raise AnsibleError("The vault password file %s was not found" % this_path) if is_executable(this_path): try: # STDERR not captured to make it easier for users to prompt for input in their scripts p = subprocess.Popen(this_path, stdout=subprocess.PIPE) except OSError, e: raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) stdout, stderr = p.communicate() vault_pass = stdout.strip('\r\n') else: try: f = open(this_path, "rb") vault_pass=f.read().strip() f.close() except (OSError, IOError), e: raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) return vault_pass
gpl-3.0
chinmaygarde/CoreLib
Test/GoogleTest/test/gtest_test_utils.py
674
10826
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for Google C++ Testing Framework.""" __author__ = '[email protected] (Zhanyong Wan)' import atexit import os import shutil import sys import tempfile import unittest _test_module = unittest # Suppresses the 'Import not at the top of the file' lint complaint. # pylint: disable-msg=C6204 try: import subprocess _SUBPROCESS_MODULE_AVAILABLE = True except: import popen2 _SUBPROCESS_MODULE_AVAILABLE = False # pylint: enable-msg=C6204 GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT' IS_WINDOWS = os.name == 'nt' IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0] # The environment variable for specifying the path to the premature-exit file. PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE' environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets/unsets an environment variable to a given value.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] # Here we expose a class from a particular module, depending on the # environment. The comment suppresses the 'Invalid variable name' lint # complaint. TestCase = _test_module.TestCase # pylint: disable-msg=C6409 # Initially maps a flag to its default value. After # _ParseAndStripGTestFlags() is called, maps a flag to its actual value. _flag_map = {'source_dir': os.path.dirname(sys.argv[0]), 'build_dir': os.path.dirname(sys.argv[0])} _gtest_flags_are_parsed = False def _ParseAndStripGTestFlags(argv): """Parses and strips Google Test flags from argv. This is idempotent.""" # Suppresses the lint complaint about a global variable since we need it # here to maintain module-wide state. global _gtest_flags_are_parsed # pylint: disable-msg=W0603 if _gtest_flags_are_parsed: return _gtest_flags_are_parsed = True for flag in _flag_map: # The environment variable overrides the default value. if flag.upper() in os.environ: _flag_map[flag] = os.environ[flag.upper()] # The command line flag overrides the environment variable. i = 1 # Skips the program name. while i < len(argv): prefix = '--' + flag + '=' if argv[i].startswith(prefix): _flag_map[flag] = argv[i][len(prefix):] del argv[i] break else: # We don't increment i in case we just found a --gtest_* flag # and removed it from argv. i += 1 def GetFlag(flag): """Returns the value of the given flag.""" # In case GetFlag() is called before Main(), we always call # _ParseAndStripGTestFlags() here to make sure the --gtest_* flags # are parsed. _ParseAndStripGTestFlags(sys.argv) return _flag_map[flag] def GetSourceDir(): """Returns the absolute path of the directory where the .py files are.""" return os.path.abspath(GetFlag('source_dir')) def GetBuildDir(): """Returns the absolute path of the directory where the test binaries are.""" return os.path.abspath(GetFlag('build_dir')) _temp_dir = None def _RemoveTempDir(): if _temp_dir: shutil.rmtree(_temp_dir, ignore_errors=True) atexit.register(_RemoveTempDir) def GetTempDir(): """Returns a directory for temporary files.""" global _temp_dir if not _temp_dir: _temp_dir = tempfile.mkdtemp() return _temp_dir def GetTestExecutablePath(executable_name, build_dir=None): """Returns the absolute path of the test binary given its name. The function will print a message and abort the program if the resulting file doesn't exist. Args: executable_name: name of the test binary that the test script runs. build_dir: directory where to look for executables, by default the result of GetBuildDir(). Returns: The absolute path of the test binary. """ path = os.path.abspath(os.path.join(build_dir or GetBuildDir(), executable_name)) if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'): path += '.exe' if not os.path.exists(path): message = ( 'Unable to find the test binary "%s". Please make sure to provide\n' 'a path to the binary via the --build_dir flag or the BUILD_DIR\n' 'environment variable.' % path) print >> sys.stderr, message sys.exit(1) return path def GetExitStatus(exit_code): """Returns the argument to exit(), or -1 if exit() wasn't called. Args: exit_code: the result value of os.system(command). """ if os.name == 'nt': # On Windows, os.WEXITSTATUS() doesn't work and os.system() returns # the argument to exit() directly. return exit_code else: # On Unix, os.WEXITSTATUS() must be used to extract the exit status # from the result of os.system(). if os.WIFEXITED(exit_code): return os.WEXITSTATUS(exit_code) else: return -1 class Subprocess: def __init__(self, command, working_dir=None, capture_stderr=True, env=None): """Changes into a specified directory, if provided, and executes a command. Restores the old directory afterwards. Args: command: The command to run, in the form of sys.argv. working_dir: The directory to change into. capture_stderr: Determines whether to capture stderr in the output member or to discard it. env: Dictionary with environment to pass to the subprocess. Returns: An object that represents outcome of the executed process. It has the following attributes: terminated_by_signal True iff the child process has been terminated by a signal. signal Sygnal that terminated the child process. exited True iff the child process exited normally. exit_code The code with which the child process exited. output Child process's stdout and stderr output combined in a string. """ # The subprocess module is the preferrable way of running programs # since it is available and behaves consistently on all platforms, # including Windows. But it is only available starting in python 2.4. # In earlier python versions, we revert to the popen2 module, which is # available in python 2.0 and later but doesn't provide required # functionality (Popen4) under Windows. This allows us to support Mac # OS X 10.4 Tiger, which has python 2.3 installed. if _SUBPROCESS_MODULE_AVAILABLE: if capture_stderr: stderr = subprocess.STDOUT else: stderr = subprocess.PIPE p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=stderr, cwd=working_dir, universal_newlines=True, env=env) # communicate returns a tuple with the file obect for the child's # output. self.output = p.communicate()[0] self._return_code = p.returncode else: old_dir = os.getcwd() def _ReplaceEnvDict(dest, src): # Changes made by os.environ.clear are not inheritable by child # processes until Python 2.6. To produce inheritable changes we have # to delete environment items with the del statement. for key in dest.keys(): del dest[key] dest.update(src) # When 'env' is not None, backup the environment variables and replace # them with the passed 'env'. When 'env' is None, we simply use the # current 'os.environ' for compatibility with the subprocess.Popen # semantics used above. if env is not None: old_environ = os.environ.copy() _ReplaceEnvDict(os.environ, env) try: if working_dir is not None: os.chdir(working_dir) if capture_stderr: p = popen2.Popen4(command) else: p = popen2.Popen3(command) p.tochild.close() self.output = p.fromchild.read() ret_code = p.wait() finally: os.chdir(old_dir) # Restore the old environment variables # if they were replaced. if env is not None: _ReplaceEnvDict(os.environ, old_environ) # Converts ret_code to match the semantics of # subprocess.Popen.returncode. if os.WIFSIGNALED(ret_code): self._return_code = -os.WTERMSIG(ret_code) else: # os.WIFEXITED(ret_code) should return True here. self._return_code = os.WEXITSTATUS(ret_code) if self._return_code < 0: self.terminated_by_signal = True self.exited = False self.signal = -self._return_code else: self.terminated_by_signal = False self.exited = True self.exit_code = self._return_code def Main(): """Runs the unit test.""" # We must call _ParseAndStripGTestFlags() before calling # unittest.main(). Otherwise the latter will be confused by the # --gtest_* flags. _ParseAndStripGTestFlags(sys.argv) # The tested binaries should not be writing XML output files unless the # script explicitly instructs them to. # TODO([email protected]): Move this into Subprocess when we implement # passing environment into it as a parameter. if GTEST_OUTPUT_VAR_NAME in os.environ: del os.environ[GTEST_OUTPUT_VAR_NAME] _test_module.main()
mit
surgebiswas/poker
PokerBots_2017/Johnny/scipy/optimize/_lsq/dogbox.py
40
11699
""" dogleg algorithm with rectangular trust regions for least-squares minimization. The description of the algorithm can be found in [Voglis]_. The algorithm does trust-region iterations, but the shape of trust regions is rectangular as opposed to conventional elliptical. The intersection of a trust region and an initial feasible region is again some rectangle. Thus on each iteration a bound-constrained quadratic optimization problem is solved. A quadratic problem is solved by well-known dogleg approach, where the function is minimized along piecewise-linear "dogleg" path [NumOpt]_, Chapter 4. If Jacobian is not rank-deficient then the function is decreasing along this path, and optimization amounts to simply following along this path as long as a point stays within the bounds. A constrained Cauchy step (along the anti-gradient) is considered for safety in rank deficient cases, in this situations the convergence might be slow. If during iterations some variable hit the initial bound and the component of anti-gradient points outside the feasible region, then a next dogleg step won't make any progress. At this state such variables satisfy first-order optimality conditions and they are excluded before computing a next dogleg step. Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for dense and sparse matrices, or Jacobian being LinearOperator). The second option allows to solve very large problems (up to couple of millions of residuals on a regular PC), provided the Jacobian matrix is sufficiently sparse. But note that dogbox is not very good for solving problems with large number of constraints, because of variables exclusion-inclusion on each iteration (a required number of function evaluations might be high or accuracy of a solution will be poor), thus its large-scale usage is probably limited to unconstrained problems. References ---------- .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg Approach for Unconstrained and Bound Constrained Nonlinear Optimization", WSEAS International Conference on Applied Mathematics, Corfu, Greece, 2004. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition". """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.linalg import lstsq, norm from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr from scipy.optimize import OptimizeResult from scipy._lib.six import string_types from .common import ( step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic, build_quadratic_1d, minimize_quadratic_1d, compute_grad, compute_jac_scale, check_termination, scale_for_robust_loss_function, print_header_nonlinear, print_iteration_nonlinear) def lsmr_operator(Jop, d, active_set): """Compute LinearOperator to use in LSMR by dogbox algorithm. `active_set` mask is used to excluded active variables from computations of matrix-vector products. """ m, n = Jop.shape def matvec(x): x_free = x.ravel().copy() x_free[active_set] = 0 return Jop.matvec(x * d) def rmatvec(x): r = d * Jop.rmatvec(x) r[active_set] = 0 return r return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float) def find_intersection(x, tr_bounds, lb, ub): """Find intersection of trust-region bounds and initial bounds. Returns ------- lb_total, ub_total : ndarray with shape of x Lower and upper bounds of the intersection region. orig_l, orig_u : ndarray of bool with shape of x True means that an original bound is taken as a corresponding bound in the intersection region. tr_l, tr_u : ndarray of bool with shape of x True means that a trust-region bound is taken as a corresponding bound in the intersection region. """ lb_centered = lb - x ub_centered = ub - x lb_total = np.maximum(lb_centered, -tr_bounds) ub_total = np.minimum(ub_centered, tr_bounds) orig_l = np.equal(lb_total, lb_centered) orig_u = np.equal(ub_total, ub_centered) tr_l = np.equal(lb_total, -tr_bounds) tr_u = np.equal(ub_total, tr_bounds) return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub): """Find dogleg step in a rectangular region. Returns ------- step : ndarray, shape (n,) Computed dogleg step. bound_hits : ndarray of int, shape (n,) Each component shows whether a corresponding variable hits the initial bound after the step is taken: * 0 - a variable doesn't hit the bound. * -1 - lower bound is hit. * 1 - upper bound is hit. tr_hit : bool Whether the step hit the boundary of the trust-region. """ lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection( x, tr_bounds, lb, ub ) bound_hits = np.zeros_like(x, dtype=int) if in_bounds(newton_step, lb_total, ub_total): return newton_step, bound_hits, False to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total) # The classical dogleg algorithm would check if Cauchy step fits into # the bounds, and just return it constrained version if not. But in a # rectangular trust region it makes sense to try to improve constrained # Cauchy step too. Thus we don't distinguish these two cases. cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g step_diff = newton_step - cauchy_step step_size, hits = step_size_to_bound(cauchy_step, step_diff, lb_total, ub_total) bound_hits[(hits < 0) & orig_l] = -1 bound_hits[(hits > 0) & orig_u] = 1 tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u) return cauchy_step + step_size * step_diff, bound_hits, tr_hit def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose): f = f0 f_true = f.copy() nfev = 1 J = J0 njev = 1 if loss_function is not None: rho = loss_function(f) cost = 0.5 * np.sum(rho[0]) J, f = scale_for_robust_loss_function(J, f, rho) else: cost = 0.5 * np.dot(f, f) g = compute_grad(J, f) jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac' if jac_scale: scale, scale_inv = compute_jac_scale(J) else: scale, scale_inv = x_scale, 1 / x_scale Delta = norm(x0 * scale_inv, ord=np.inf) if Delta == 0: Delta = 1.0 on_bound = np.zeros_like(x0, dtype=int) on_bound[np.equal(x0, lb)] = -1 on_bound[np.equal(x0, ub)] = 1 x = x0 step = np.empty_like(x0) if max_nfev is None: max_nfev = x0.size * 100 termination_status = None iteration = 0 step_norm = None actual_reduction = None if verbose == 2: print_header_nonlinear() while True: active_set = on_bound * g < 0 free_set = ~active_set g_free = g[free_set] g_full = g.copy() g[active_set] = 0 g_norm = norm(g, ord=np.inf) if g_norm < gtol: termination_status = 1 if verbose == 2: print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, step_norm, g_norm) if termination_status is not None or nfev == max_nfev: break x_free = x[free_set] lb_free = lb[free_set] ub_free = ub[free_set] scale_free = scale[free_set] # Compute (Gauss-)Newton and build quadratic model for Cauchy step. if tr_solver == 'exact': J_free = J[:, free_set] newton_step = lstsq(J_free, -f)[0] # Coefficients for the quadratic model along the anti-gradient. a, b = build_quadratic_1d(J_free, g_free, -g_free) elif tr_solver == 'lsmr': Jop = aslinearoperator(J) # We compute lsmr step in scaled variables and then # transform back to normal variables, if lsmr would give exact lsq # solution this would be equivalent to not doing any # transformations, but from experience it's better this way. # We pass active_set to make computations as if we selected # the free subset of J columns, but without actually doing any # slicing, which is expensive for sparse matrices and impossible # for LinearOperator. lsmr_op = lsmr_operator(Jop, scale, active_set) newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set] newton_step *= scale_free # Components of g for active variables were zeroed, so this call # is correct and equivalent to using J_free and g_free. a, b = build_quadratic_1d(Jop, g, -g) actual_reduction = -1.0 while actual_reduction <= 0 and nfev < max_nfev: tr_bounds = Delta * scale_free step_free, on_bound_free, tr_hit = dogleg_step( x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free) step.fill(0.0) step[free_set] = step_free if tr_solver == 'exact': predicted_reduction = -evaluate_quadratic(J_free, g_free, step_free) elif tr_solver == 'lsmr': predicted_reduction = -evaluate_quadratic(Jop, g, step) x_new = x + step f_new = fun(x_new) nfev += 1 step_h_norm = norm(step * scale_inv, ord=np.inf) if not np.all(np.isfinite(f_new)): Delta = 0.25 * step_h_norm continue # Usual trust-region step quality estimation. if loss_function is not None: cost_new = loss_function(f_new, cost_only=True) else: cost_new = 0.5 * np.dot(f_new, f_new) actual_reduction = cost - cost_new Delta, ratio = update_tr_radius( Delta, actual_reduction, predicted_reduction, step_h_norm, tr_hit ) step_norm = norm(step) termination_status = check_termination( actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) if termination_status is not None: break if actual_reduction > 0: on_bound[free_set] = on_bound_free x = x_new # Set variables exactly at the boundary. mask = on_bound == -1 x[mask] = lb[mask] mask = on_bound == 1 x[mask] = ub[mask] f = f_new f_true = f.copy() cost = cost_new J = jac(x, f) njev += 1 if loss_function is not None: rho = loss_function(f) J, f = scale_for_robust_loss_function(J, f, rho) g = compute_grad(J, f) if jac_scale: scale, scale_inv = compute_jac_scale(J, scale_inv) else: step_norm = 0 actual_reduction = 0 iteration += 1 if termination_status is None: termination_status = 0 return OptimizeResult( x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm, active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
mit
GertBurger/pygame_cffi
benchmarks/profiling.py
3
1895
import os import time class StopProfiling(Exception): pass class Profile(object): class NoopTimer(object): def __enter__(self): pass def __exit__(self, *args): pass class Timer(object): def __init__(self, profile, identifier): self.profile = profile self.identifier = identifier def __enter__(self): self._start = time.time() def __exit__(self, *args): elapsed = time.time() - self._start self.profile.segments.setdefault(self.identifier, (0.0, 0.0)) total, n = self.profile.segments[self.identifier] self.profile.segments[self.identifier] = ( total + elapsed, n + 1.0, ) def __init__(self, delay=-1, period=10): self.segments = {} self.delay = delay self.created = time.time() self.period = period if delay != -1: self.period += delay def time(self, name): diff = time.time() - self.created if diff > self.period: raise StopProfiling elif diff > self.delay: return Profile.Timer(self, name) else: return Profile.NoopTimer() def __str__(self): s = '' max_key_len = 0 for key in self.segments.keys(): if len(key) > max_key_len: max_key_len = len(key) for key in sorted(self.segments.keys()): val = self.segments[key] total = val[0] n = val[1] average = val[0] / val[1] spaces = ' ' * (max_key_len - len(key)) s += '%s:%s\t%.5f\t%d\t%.8f\n' % (key, spaces, total, n, average) return s if __name__ == '__main__': profile = Profile() with profile.time('test'): time.sleep(1) print profile
lgpl-2.1
mystique1029/namebench
libnamebench/better_webbrowser.py
175
4191
#!/usr/bin/env python # Copyright 2009 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wrapper for webbrowser library, to invoke the http handler on win32.""" __author__ = '[email protected] (Thomas Stromberg)' import os.path import subprocess import sys import traceback import webbrowser import util def output(string): print string def create_win32_http_cmd(url): """Create a command-line tuple to launch a web browser for a given URL. Args: url: string Returns: tuple of: (executable, arg1, arg2, ...) At the moment, this ignores all default arguments to the browser. TODO(tstromberg): Properly parse the command-line arguments. """ browser_type = None try: key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\Classes\http\shell\open\command') browser_type = 'user' except WindowsError: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'Software\Classes\http\shell\open\command') browser_type = 'machine' except: return False cmd = _winreg.EnumValue(key, 0)[1] # "C:\blah blah\iexplore.exe" -nohome # "C:\blah blah\firefox.exe" -requestPending -osint -url "%1" if '"' in cmd: executable = cmd.split('"')[1] else: executable = cmd.split(' ')[0] if not os.path.exists(executable): output('$ Default HTTP browser does not exist: %s' % executable) return False else: output('$ %s HTTP handler: %s' % (browser_type, executable)) return (executable, url) def open(url): """Opens a URL, overriding the normal webbrowser.open methods for sanity.""" try: webbrowser.open(url, new=1, autoraise=True) # If the user is missing the osascript binary - see # http://code.google.com/p/namebench/issues/detail?id=88 except: output('Failed to open: [%s]: %s' % (url, util.GetLastExceptionString())) if os.path.exists('/usr/bin/open'): try: output('trying open: %s' % url) p = subprocess.Popen(('open', url)) p.wait() except: output('open did not seem to work: %s' % util.GetLastExceptionString()) elif sys.platform[:3] == 'win': try: output('trying default Windows controller: %s' % url) controller = webbrowser.get('windows-default') controller.open_new(url) except: output('WindowsController did not work: %s' % util.GetLastExceptionString()) # *NOTE*: EVIL IMPORT SIDE EFFECTS AHEAD! # # If we are running on Windows, register the WindowsHttpDefault class. if sys.platform[:3] == 'win': import _winreg # We don't want to load this class by default, because Python 2.4 doesn't have BaseBrowser. class WindowsHttpDefault(webbrowser.BaseBrowser): """Provide an alternate open class for Windows user, using the http handler.""" def open(self, url, new=0, autoraise=1): command_args = create_win32_http_cmd(url) if not command_args: output('$ Could not find HTTP handler') return False output('command_args:') output(command_args) # Avoid some unicode path issues by moving our current directory old_pwd = os.getcwd() os.chdir('C:\\') try: _unused = subprocess.Popen(command_args) os.chdir(old_pwd) return True except: traceback.print_exc() output('$ Failed to run HTTP handler, trying next browser.') os.chdir(old_pwd) return False webbrowser.register('windows-http', WindowsHttpDefault, update_tryorder=-1)
apache-2.0
shinglyu/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_endtoend.py
449
26811
#!/usr/bin/env python # # Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """End-to-end tests for pywebsocket. Tests standalone.py by default. You can also test mod_pywebsocket hosted on an Apache server by setting _use_external_server to True and modifying _external_server_port to point to the port on which the Apache server is running. """ import logging import os import signal import socket import subprocess import sys import time import unittest import set_sys_path # Update sys.path to locate mod_pywebsocket module. from test import client_for_testing from test import mux_client_for_testing # Special message that tells the echo server to start closing handshake _GOODBYE_MESSAGE = 'Goodbye' _SERVER_WARMUP_IN_SEC = 0.2 # If you want to use external server to run end to end tests, set following # parameters correctly. _use_external_server = False _external_server_port = 0 # Test body functions def _echo_check_procedure(client): client.connect() client.send_message('test') client.assert_receive('test') client.send_message('helloworld') client.assert_receive('helloworld') client.send_close() client.assert_receive_close() client.assert_connection_closed() def _echo_check_procedure_with_binary(client): client.connect() client.send_message('binary', binary=True) client.assert_receive('binary', binary=True) client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True) client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True) client.send_close() client.assert_receive_close() client.assert_connection_closed() def _echo_check_procedure_with_goodbye(client): client.connect() client.send_message('test') client.assert_receive('test') client.send_message(_GOODBYE_MESSAGE) client.assert_receive(_GOODBYE_MESSAGE) client.assert_receive_close() client.send_close() client.assert_connection_closed() def _echo_check_procedure_with_code_and_reason(client, code, reason): client.connect() client.send_close(code, reason) client.assert_receive_close(code, reason) client.assert_connection_closed() def _unmasked_frame_check_procedure(client): client.connect() client.send_message('test', mask=False) client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '') client.assert_connection_closed() def _mux_echo_check_procedure(mux_client): mux_client.connect() mux_client.send_flow_control(1, 1024) logical_channel_options = client_for_testing.ClientOptions() logical_channel_options.server_host = 'localhost' logical_channel_options.server_port = 80 logical_channel_options.origin = 'http://localhost' logical_channel_options.resource = '/echo' mux_client.add_channel(2, logical_channel_options) mux_client.send_flow_control(2, 1024) mux_client.send_message(2, 'test') mux_client.assert_receive(2, 'test') mux_client.add_channel(3, logical_channel_options) mux_client.send_flow_control(3, 1024) mux_client.send_message(2, 'hello') mux_client.send_message(3, 'world') mux_client.assert_receive(2, 'hello') mux_client.assert_receive(3, 'world') # Don't send close message on channel id 1 so that server-initiated # closing handshake won't occur. mux_client.send_close(2) mux_client.send_close(3) mux_client.assert_receive_close(2) mux_client.assert_receive_close(3) mux_client.send_physical_connection_close() mux_client.assert_physical_connection_receive_close() class EndToEndTestBase(unittest.TestCase): """Base class for end-to-end tests that launch pywebsocket standalone server as a separate process, connect to it using the client_for_testing module, and check if the server behaves correctly by exchanging opening handshake and frames over a TCP connection. """ def setUp(self): self.server_stderr = None self.top_dir = os.path.join(os.path.split(__file__)[0], '..') os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path)) self.standalone_command = os.path.join( self.top_dir, 'mod_pywebsocket', 'standalone.py') self.document_root = os.path.join(self.top_dir, 'example') s = socket.socket() s.bind(('localhost', 0)) (_, self.test_port) = s.getsockname() s.close() self._options = client_for_testing.ClientOptions() self._options.server_host = 'localhost' self._options.origin = 'http://localhost' self._options.resource = '/echo' # TODO(toyoshim): Eliminate launching a standalone server on using # external server. if _use_external_server: self._options.server_port = _external_server_port else: self._options.server_port = self.test_port # TODO(tyoshino): Use tearDown to kill the server. def _run_python_command(self, commandline, stdout=None, stderr=None): return subprocess.Popen([sys.executable] + commandline, close_fds=True, stdout=stdout, stderr=stderr) def _run_server(self): args = [self.standalone_command, '-H', 'localhost', '-V', 'localhost', '-p', str(self.test_port), '-P', str(self.test_port), '-d', self.document_root] # Inherit the level set to the root logger by test runner. root_logger = logging.getLogger() log_level = root_logger.getEffectiveLevel() if log_level != logging.NOTSET: args.append('--log-level') args.append(logging.getLevelName(log_level).lower()) return self._run_python_command(args, stderr=self.server_stderr) def _kill_process(self, pid): if sys.platform in ('win32', 'cygwin'): subprocess.call( ('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True) else: os.kill(pid, signal.SIGKILL) class EndToEndHyBiTest(EndToEndTestBase): def setUp(self): EndToEndTestBase.setUp(self) def _run_test_with_client_options(self, test_function, options): server = self._run_server() try: # TODO(tyoshino): add some logic to poll the server until it # becomes ready time.sleep(_SERVER_WARMUP_IN_SEC) client = client_for_testing.create_client(options) try: test_function(client) finally: client.close_socket() finally: self._kill_process(server.pid) def _run_test(self, test_function): self._run_test_with_client_options(test_function, self._options) def _run_deflate_frame_test(self, test_function): server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) self._options.enable_deflate_frame() client = client_for_testing.create_client(self._options) try: test_function(client) finally: client.close_socket() finally: self._kill_process(server.pid) def _run_permessage_deflate_test( self, offer, response_checker, test_function): server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) self._options.extensions += offer self._options.check_permessage_deflate = response_checker client = client_for_testing.create_client(self._options) try: client.connect() if test_function is not None: test_function(client) client.assert_connection_closed() finally: client.close_socket() finally: self._kill_process(server.pid) def _run_close_with_code_and_reason_test(self, test_function, code, reason): server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) client = client_for_testing.create_client(self._options) try: test_function(client, code, reason) finally: client.close_socket() finally: self._kill_process(server.pid) def _run_http_fallback_test(self, options, status): server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) client = client_for_testing.create_client(options) try: client.connect() self.fail('Could not catch HttpStatusException') except client_for_testing.HttpStatusException, e: self.assertEqual(status, e.status) except Exception, e: self.fail('Catch unexpected exception') finally: client.close_socket() finally: self._kill_process(server.pid) def _run_mux_test(self, test_function): server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) client = mux_client_for_testing.MuxClient(self._options) try: test_function(client) finally: client.close_socket() finally: self._kill_process(server.pid) def test_echo(self): self._run_test(_echo_check_procedure) def test_echo_binary(self): self._run_test(_echo_check_procedure_with_binary) def test_echo_server_close(self): self._run_test(_echo_check_procedure_with_goodbye) def test_unmasked_frame(self): self._run_test(_unmasked_frame_check_procedure) def test_echo_deflate_frame(self): self._run_deflate_frame_test(_echo_check_procedure) def test_echo_deflate_frame_server_close(self): self._run_deflate_frame_test( _echo_check_procedure_with_goodbye) def test_echo_permessage_deflate(self): def test_function(client): # From the examples in the spec. compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00' client._stream.send_data( compressed_hello, client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( compressed_hello, opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client.send_close() client.assert_receive_close() def response_checker(parameter): self.assertEquals('permessage-deflate', parameter.name()) self.assertEquals([], parameter.get_parameters()) self._run_permessage_deflate_test( ['permessage-deflate'], response_checker, test_function) def test_echo_permessage_deflate_two_frames(self): def test_function(client): # From the examples in the spec. client._stream.send_data( '\xf2\x48\xcd', client_for_testing.OPCODE_TEXT, end=False, rsv1=1) client._stream.send_data( '\xc9\xc9\x07\x00', client_for_testing.OPCODE_TEXT) client._stream.assert_receive_binary( '\xf2\x48\xcd\xc9\xc9\x07\x00', opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client.send_close() client.assert_receive_close() def response_checker(parameter): self.assertEquals('permessage-deflate', parameter.name()) self.assertEquals([], parameter.get_parameters()) self._run_permessage_deflate_test( ['permessage-deflate'], response_checker, test_function) def test_echo_permessage_deflate_two_messages(self): def test_function(client): # From the examples in the spec. client._stream.send_data( '\xf2\x48\xcd\xc9\xc9\x07\x00', client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.send_data( '\xf2\x00\x11\x00\x00', client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( '\xf2\x48\xcd\xc9\xc9\x07\x00', opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( '\xf2\x00\x11\x00\x00', opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client.send_close() client.assert_receive_close() def response_checker(parameter): self.assertEquals('permessage-deflate', parameter.name()) self.assertEquals([], parameter.get_parameters()) self._run_permessage_deflate_test( ['permessage-deflate'], response_checker, test_function) def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self): def test_function(client): # From the examples in the spec. client._stream.send_data( '\xf2\x48\xcd\xc9\xc9\x07\x00', client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.send_data( '\xf2\x00\x11\x00\x00', client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( '\xf2\x48\xcd\xc9\xc9\x07\x00', opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( '\xf2\x48\xcd\xc9\xc9\x07\x00', opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client.send_close() client.assert_receive_close() def response_checker(parameter): self.assertEquals('permessage-deflate', parameter.name()) self.assertEquals([('server_no_context_takeover', None)], parameter.get_parameters()) self._run_permessage_deflate_test( ['permessage-deflate; server_no_context_takeover'], response_checker, test_function) def test_echo_permessage_deflate_preference(self): def test_function(client): # From the examples in the spec. compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00' client._stream.send_data( compressed_hello, client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( compressed_hello, opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client.send_close() client.assert_receive_close() def response_checker(parameter): self.assertEquals('permessage-deflate', parameter.name()) self.assertEquals([], parameter.get_parameters()) self._run_permessage_deflate_test( ['permessage-deflate', 'deflate-frame'], response_checker, test_function) def test_echo_permessage_deflate_with_parameters(self): def test_function(client): # From the examples in the spec. compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00' client._stream.send_data( compressed_hello, client_for_testing.OPCODE_TEXT, rsv1=1) client._stream.assert_receive_binary( compressed_hello, opcode=client_for_testing.OPCODE_TEXT, rsv1=1) client.send_close() client.assert_receive_close() def response_checker(parameter): self.assertEquals('permessage-deflate', parameter.name()) self.assertEquals([('server_max_window_bits', '10'), ('server_no_context_takeover', None)], parameter.get_parameters()) self._run_permessage_deflate_test( ['permessage-deflate; server_max_window_bits=10; ' 'server_no_context_takeover'], response_checker, test_function) def test_echo_permessage_deflate_with_bad_server_max_window_bits(self): def test_function(client): client.send_close() client.assert_receive_close() def response_checker(parameter): raise Exception('Unexpected acceptance of permessage-deflate') self._run_permessage_deflate_test( ['permessage-deflate; server_max_window_bits=3000000'], response_checker, test_function) def test_echo_permessage_deflate_with_bad_server_max_window_bits(self): def test_function(client): client.send_close() client.assert_receive_close() def response_checker(parameter): raise Exception('Unexpected acceptance of permessage-deflate') self._run_permessage_deflate_test( ['permessage-deflate; server_max_window_bits=3000000'], response_checker, test_function) def test_echo_permessage_deflate_with_undefined_parameter(self): def test_function(client): client.send_close() client.assert_receive_close() def response_checker(parameter): raise Exception('Unexpected acceptance of permessage-deflate') self._run_permessage_deflate_test( ['permessage-deflate; foo=bar'], response_checker, test_function) def test_echo_close_with_code_and_reason(self): self._options.resource = '/close' self._run_close_with_code_and_reason_test( _echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun') def test_echo_close_with_empty_body(self): self._options.resource = '/close' self._run_close_with_code_and_reason_test( _echo_check_procedure_with_code_and_reason, None, '') def test_mux_echo(self): self._run_mux_test(_mux_echo_check_procedure) def test_close_on_protocol_error(self): """Tests that the server sends a close frame with protocol error status code when the client sends data with some protocol error. """ def test_function(client): client.connect() # Intermediate frame without any preceding start of fragmentation # frame. client.send_frame_of_arbitrary_bytes('\x80\x80', '') client.assert_receive_close( client_for_testing.STATUS_PROTOCOL_ERROR) self._run_test(test_function) def test_close_on_unsupported_frame(self): """Tests that the server sends a close frame with unsupported operation status code when the client sends data asking some operation that is not supported by the server. """ def test_function(client): client.connect() # Text frame with RSV3 bit raised. client.send_frame_of_arbitrary_bytes('\x91\x80', '') client.assert_receive_close( client_for_testing.STATUS_UNSUPPORTED_DATA) self._run_test(test_function) def test_close_on_invalid_frame(self): """Tests that the server sends a close frame with invalid frame payload data status code when the client sends an invalid frame like containing invalid UTF-8 character. """ def test_function(client): client.connect() # Text frame with invalid UTF-8 string. client.send_message('\x80', raw=True) client.assert_receive_close( client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA) self._run_test(test_function) def test_close_on_internal_endpoint_error(self): """Tests that the server sends a close frame with internal endpoint error status code when the handler does bad operation. """ self._options.resource = '/internal_error' def test_function(client): client.connect() client.assert_receive_close( client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR) self._run_test(test_function) # TODO(toyoshim): Add tests to verify invalid absolute uri handling like # host unmatch, port unmatch and invalid port description (':' without port # number). def test_absolute_uri(self): """Tests absolute uri request.""" options = self._options options.resource = 'ws://localhost:%d/echo' % options.server_port self._run_test_with_client_options(_echo_check_procedure, options) def test_origin_check(self): """Tests http fallback on origin check fail.""" options = self._options options.resource = '/origin_check' # Server shows warning message for http 403 fallback. This warning # message is confusing. Following pipe disposes warning messages. self.server_stderr = subprocess.PIPE self._run_http_fallback_test(options, 403) def test_version_check(self): """Tests http fallback on version check fail.""" options = self._options options.version = 99 self._run_http_fallback_test(options, 400) class EndToEndHyBi00Test(EndToEndTestBase): def setUp(self): EndToEndTestBase.setUp(self) def _run_test(self, test_function): server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) client = client_for_testing.create_client_hybi00(self._options) try: test_function(client) finally: client.close_socket() finally: self._kill_process(server.pid) def test_echo(self): self._run_test(_echo_check_procedure) def test_echo_server_close(self): self._run_test(_echo_check_procedure_with_goodbye) class EndToEndTestWithEchoClient(EndToEndTestBase): def setUp(self): EndToEndTestBase.setUp(self) def _check_example_echo_client_result( self, expected, stdoutdata, stderrdata): actual = stdoutdata.decode("utf-8") if actual != expected: raise Exception('Unexpected result on example echo client: ' '%r (expected) vs %r (actual)' % (expected, actual)) if stderrdata is not None: raise Exception('Unexpected error message on example echo ' 'client: %r' % stderrdata) def test_example_echo_client(self): """Tests that the echo_client.py example can talk with the server.""" server = self._run_server() try: time.sleep(_SERVER_WARMUP_IN_SEC) client_command = os.path.join( self.top_dir, 'example', 'echo_client.py') # Expected output for the default messages. default_expectation = ('Send: Hello\n' 'Recv: Hello\n' u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n' 'Send close\n' 'Recv ack\n') args = [client_command, '-p', str(self._options.server_port)] client = self._run_python_command(args, stdout=subprocess.PIPE) stdoutdata, stderrdata = client.communicate() self._check_example_echo_client_result( default_expectation, stdoutdata, stderrdata) # Process a big message for which extended payload length is used. # To handle extended payload length, ws_version attribute will be # accessed. This test checks that ws_version is correctly set. big_message = 'a' * 1024 args = [client_command, '-p', str(self._options.server_port), '-m', big_message] client = self._run_python_command(args, stdout=subprocess.PIPE) stdoutdata, stderrdata = client.communicate() expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' % (big_message, big_message)) self._check_example_echo_client_result( expected, stdoutdata, stderrdata) # Test the permessage-deflate extension. args = [client_command, '-p', str(self._options.server_port), '--use_permessage_deflate'] client = self._run_python_command(args, stdout=subprocess.PIPE) stdoutdata, stderrdata = client.communicate() self._check_example_echo_client_result( default_expectation, stdoutdata, stderrdata) finally: self._kill_process(server.pid) if __name__ == '__main__': unittest.main() # vi:sts=4 sw=4 et
mpl-2.0
dhorelik/django-cms
cms/tests/test_signals.py
23
4283
# -*- coding: utf-8 -*- from __future__ import with_statement from contextlib import contextmanager from django.conf import settings from django.contrib.auth import get_user_model from django.test import TestCase from django.test.utils import override_settings from cms.api import create_page from cms.models import UrlconfRevision from cms.signals import urls_need_reloading from cms.test_utils.testcases import CMSTestCase APP_NAME = 'SampleApp' class SignalTester(object): def __init__(self): self.call_count = 0 self.calls = [] def __call__(self, *args, **kwargs): self.call_count += 1 self.calls.append((args, kwargs)) @contextmanager def signal_tester(signal): env = SignalTester() signal.connect(env, weak=True) try: yield env finally: signal.disconnect(env, weak=True) class SignalTests(TestCase): def test_urls_need_reloading_signal_create(self): with signal_tester(urls_need_reloading) as env: self.client.get('/') self.assertEqual(env.call_count, 0) create_page( "apphooked-page", "nav_playground.html", "en", published=True, apphook="SampleApp", apphook_namespace="test" ) self.client.get('/') self.assertEqual(env.call_count, 1) def test_urls_need_reloading_signal_delete(self): with signal_tester(urls_need_reloading) as env: self.client.get('/') self.assertEqual(env.call_count, 0) page = create_page( "apphooked-page", "nav_playground.html", "en", published=True, apphook="SampleApp", apphook_namespace="test" ) page.delete() self.client.get('/') self.assertEqual(env.call_count, 1) def test_urls_need_reloading_signal_change_slug(self): with signal_tester(urls_need_reloading) as env: self.assertEqual(env.call_count, 0) page = create_page( "apphooked-page", "nav_playground.html", "en", published=True, apphook="SampleApp", apphook_namespace="test" ) self.client.get('/') self.assertEqual(env.call_count, 1) title = page.title_set.get(language="en") title.slug += 'test' title.save() page.publish('en') self.client.get('/') self.assertEqual(env.call_count, 2) @override_settings( MIDDLEWARE_CLASSES=[ 'cms.middleware.utils.ApphookReloadMiddleware' ] + settings.MIDDLEWARE_CLASSES, ) class ApphooksReloadTests(CMSTestCase): def test_urls_reloaded(self): """ Tests that URLs are automatically reloaded when the ApphookReload middleware is installed. """ # # Sets up an apphook'ed page, but does not yet publish it. # superuser = get_user_model().objects.create_superuser( 'admin', '[email protected]', 'admin') page = create_page("home", "nav_playground.html", "en", created_by=superuser) page.publish('en') app_page = create_page("app_page", "nav_playground.html", "en", created_by=superuser, parent=page, published=False, apphook="SampleApp") self.client.get('/') # Required to invoke the middleware # # Gets the current urls revision for testing against later. # current_revision, _ = UrlconfRevision.get_or_create_revision() # # Publishes the apphook. This is one of many ways to trigger the # firing of the signal. The tests above test some of the other ways # already. # app_page.publish('en') self.client.get('/') # Required to invoke the middleware # And, this should result in a the updating of the UrlconfRevision new_revision, _ = UrlconfRevision.get_or_create_revision() self.assertNotEquals(current_revision, new_revision)
bsd-3-clause
rhyolight/nupic
examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py
50
15806
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Template file used by the OPF Experiment Generator to generate the actual description.py file by replacing $XXXXXXXX tokens with desired values. This description.py file was generated by: '~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py' """ from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI from nupic.frameworks.opf.exp_description_helpers import ( updateConfigFromSubConfig, applyValueGettersToContainer, DeferredDictLookup) from nupic.frameworks.opf.htm_prediction_model_callbacks import * from nupic.frameworks.opf.metrics import MetricSpec from nupic.frameworks.opf.opf_utils import (InferenceType, InferenceElement) from nupic.support import aggregationDivide from nupic.frameworks.opf.opf_task_driver import ( IterationPhaseSpecLearnOnly, IterationPhaseSpecInferOnly, IterationPhaseSpecLearnAndInfer) # Model Configuration Dictionary: # # Define the model parameters and adjust for any modifications if imported # from a sub-experiment. # # These fields might be modified by a sub-experiment; this dict is passed # between the sub-experiment and base experiment # # # NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements # within the config dictionary may be assigned futures derived from the # ValueGetterBase class, such as DeferredDictLookup. # This facility is particularly handy for enabling substitution of values in # the config dictionary from other values in the config dictionary, which is # needed by permutation.py-based experiments. These values will be resolved # during the call to applyValueGettersToContainer(), # which we call after the base experiment's config dictionary is updated from # the sub-experiment. See ValueGetterBase and # DeferredDictLookup for more details about value-getters. # # For each custom encoder parameter to be exposed to the sub-experiment/ # permutation overrides, define a variable in this section, using key names # beginning with a single underscore character to avoid collisions with # pre-defined keys (e.g., _dsEncoderFieldName2_N). # # Example: # config = dict( # _dsEncoderFieldName2_N = 70, # _dsEncoderFieldName2_W = 5, # dsEncoderSchema = [ # base=dict( # fieldname='Name2', type='ScalarEncoder', # name='Name2', minval=0, maxval=270, clipInput=True, # n=DeferredDictLookup('_dsEncoderFieldName2_N'), # w=DeferredDictLookup('_dsEncoderFieldName2_W')), # ], # ) # updateConfigFromSubConfig(config) # applyValueGettersToContainer(config) config = { # Type of model that the rest of these parameters apply to. 'model': "HTMPrediction", # Version that specifies the format of the config. 'version': 1, # Intermediate variables used to compute fields in modelParams and also # referenced from the control section. 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), ('numericFieldNameB', 'sum'), ('categoryFieldNameC', 'first')], 'hours': 0}, 'predictAheadTime': None, # Model parameter dictionary. 'modelParams': { # The type of inference that this model will perform 'inferenceType': 'NontemporalAnomaly', 'sensorParams': { # Sensor diagnostic output verbosity control; # if > 0: sensor region will print out on screen what it's sensing # at each step 0: silent; >=1: some info; >=2: more info; # >=3: even more info (see compute() in py/regions/RecordSensor.py) 'verbosity' : 0, # Example: # dsEncoderSchema = [ # DeferredDictLookup('__field_name_encoder'), # ], # # (value generated from DS_ENCODER_SCHEMA) 'encoders': { 'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21), 'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21), }, # A dictionary specifying the period for automatically-generated # resets from a RecordSensor; # # None = disable automatically-generated resets (also disabled if # all of the specified values evaluate to 0). # Valid keys is the desired combination of the following: # days, hours, minutes, seconds, milliseconds, microseconds, weeks # # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), # # (value generated from SENSOR_AUTO_RESET) 'sensorAutoReset' : None, }, 'spEnable': True, 'spParams': { # SP diagnostic output verbosity control; # 0: silent; >=1: some info; >=2: more info; 'spVerbosity' : 0, 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, 'inputWidth': 0, # SP inhibition control (absolute value); # Maximum number of active columns in the SP region's output (when # there are more, the weaker ones are suppressed) 'numActiveColumnsPerInhArea': 40, 'seed': 1956, # potentialPct # What percent of the columns's receptive field is available # for potential synapses. At initialization time, we will # choose potentialPct * (2*potentialRadius+1)^2 'potentialPct': 0.5, # The default connected threshold. Any synapse whose # permanence value is above the connected threshold is # a "connected synapse", meaning it can contribute to the # cell's firing. Typical value is 0.10. Cells whose activity # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, 'synPermActiveInc': 0.1, 'synPermInactiveDec': 0.01, }, # Controls whether TM is enabled or disabled; # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity # (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, # The number of cells (i.e., states), allocated per column. 'cellsPerColumn': 32, 'inputWidth': 2048, 'seed': 1960, # Temporal Pooler implementation selector (see _getTPClass in # CLARegion.py). 'temporalImp': 'cpp', # New Synapse formation count # NOTE: If None, use spNumActivePerInhArea # # TODO: need better explanation 'newSynapseCount': 20, # Maximum number of synapses per segment # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, # Maximum number of segments per cell # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, # Initial Permanence # TODO: need better explanation 'initialPerm': 0.21, # Permanence Increment 'permanenceInc': 0.1, # Permanence Decrement # If set to None, will automatically default to tpPermanenceInc # value. 'permanenceDec' : 0.1, 'globalDecay': 0.0, 'maxAge': 0, # Minimum number of active synapses for a segment to be considered # during search for the best-matching segments. # None=use default # Replaces: tpMinThreshold 'minThreshold': 12, # Segment activation threshold. # A segment is active if it has >= tpSegmentActivationThreshold # connected synapses that are active due to infActiveState # None=use default # Replaces: tpActivationThreshold 'activationThreshold': 16, 'outputType': 'normal', # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. 'pamLength': 1, }, 'clParams': { # Classifier implementation selection. 'implementation': 'py', 'regionName' : 'SDRClassifierRegion', # Classifier diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'verbosity' : 0, # This controls how fast the classifier learns/forgets. Higher values # make it adapt faster and forget older patterns faster. 'alpha': 0.001, # This is set after the call to updateConfigFromSubConfig and is # computed from the aggregationInfo and predictAheadTime. 'steps': '1', }, 'trainSPNetOnlyIfRequested': False, }, } # end of config dictionary # Adjust base config dictionary for any modifications if imported from a # sub-experiment updateConfigFromSubConfig(config) # Compute predictionSteps based on the predictAheadTime and the aggregation # period, which may be permuted over. if config['predictAheadTime'] is not None: predictionSteps = int(round(aggregationDivide( config['predictAheadTime'], config['aggregationInfo']))) assert (predictionSteps >= 1) config['modelParams']['clParams']['steps'] = str(predictionSteps) # Adjust config by applying ValueGetterBase-derived # futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) # [optional] A sequence of one or more tasks that describe what to do with the # model. Each task consists of a task label, an input spec., iteration count, # and a task-control spec per opfTaskSchema.json # # NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. # Clients that interact with OPFExperiment directly do not make use of # the tasks specification. # control = dict( environment='opfExperiment', tasks = [ { # Task label; this label string may be used for diagnostic logging and for # constructing filenames or directory pathnames for task-specific files, etc. 'taskLabel' : "Anomaly", # Input stream specification per py/nupic/cluster/database/StreamDef.json. # 'dataset' : { 'info': 'test_NoProviders', 'version': 1, 'streams': [ { 'columns': ['*'], 'info': 'my simple dataset', 'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), } ], # TODO: Aggregation is not supported yet by run_opf_experiment.py #'aggregation' : config['aggregationInfo'] }, # Iteration count: maximum number of iterations. Each iteration corresponds # to one record from the (possibly aggregated) dataset. The task is # terminated when either number of iterations reaches iterationCount or # all records in the (possibly aggregated) database have been processed, # whichever occurs first. # # iterationCount of -1 = iterate over the entire dataset 'iterationCount' : -1, # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) 'taskControl' : { # Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX # instances. 'iterationCycle' : [ #IterationPhaseSpecLearnOnly(1000), IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), #IterationPhaseSpecInferOnly(10, inferenceArgs=None), ], 'metrics' : [ ], # Logged Metrics: A sequence of regular expressions that specify which of # the metrics from the Inference Specifications section MUST be logged for # every prediction. The regex's correspond to the automatically generated # metric labels. This is similar to the way the optimization metric is # specified in permutations.py. 'loggedMetrics': ['.*nupicScore.*'], # Callbacks for experimentation/research (optional) 'callbacks' : { # Callbacks to be called at the beginning of a task, before model iterations. # Signature: callback(<reference to OPFExperiment>); returns nothing # 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb], # 'setup' : [htmPredictionModelControlDisableTPLearningCb], 'setup' : [], # Callbacks to be called after every learning/inference iteration # Signature: callback(<reference to OPFExperiment>); returns nothing 'postIter' : [], # Callbacks to be called when the experiment task is finished # Signature: callback(<reference to OPFExperiment>); returns nothing 'finish' : [] } } # End of taskControl }, # End of task ] ) descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, control=control)
agpl-3.0
nkgilley/home-assistant
homeassistant/components/lyft/sensor.py
16
8962
"""Support for the Lyft API.""" from datetime import timedelta import logging from lyft_rides.auth import ClientCredentialGrant from lyft_rides.client import LyftRidesClient from lyft_rides.errors import APIError import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, TIME_MINUTES import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) CONF_END_LATITUDE = "end_latitude" CONF_END_LONGITUDE = "end_longitude" CONF_PRODUCT_IDS = "product_ids" CONF_START_LATITUDE = "start_latitude" CONF_START_LONGITUDE = "start_longitude" ICON = "mdi:taxi" MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_CLIENT_ID): cv.string, vol.Required(CONF_CLIENT_SECRET): cv.string, vol.Optional(CONF_START_LATITUDE): cv.latitude, vol.Optional(CONF_START_LONGITUDE): cv.longitude, vol.Optional(CONF_END_LATITUDE): cv.latitude, vol.Optional(CONF_END_LONGITUDE): cv.longitude, vol.Optional(CONF_PRODUCT_IDS): vol.All(cv.ensure_list, [cv.string]), } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Lyft sensor.""" auth_flow = ClientCredentialGrant( client_id=config.get(CONF_CLIENT_ID), client_secret=config.get(CONF_CLIENT_SECRET), scopes="public", is_sandbox_mode=False, ) try: session = auth_flow.get_session() timeandpriceest = LyftEstimate( session, config.get(CONF_START_LATITUDE, hass.config.latitude), config.get(CONF_START_LONGITUDE, hass.config.longitude), config.get(CONF_END_LATITUDE), config.get(CONF_END_LONGITUDE), ) timeandpriceest.fetch_data() except APIError as exc: _LOGGER.error("Error setting up Lyft platform: %s", exc) return False wanted_product_ids = config.get(CONF_PRODUCT_IDS) dev = [] for product_id, product in timeandpriceest.products.items(): if (wanted_product_ids is not None) and (product_id not in wanted_product_ids): continue dev.append(LyftSensor("time", timeandpriceest, product_id, product)) if product.get("estimate") is not None: dev.append(LyftSensor("price", timeandpriceest, product_id, product)) add_entities(dev, True) class LyftSensor(Entity): """Implementation of an Lyft sensor.""" def __init__(self, sensorType, products, product_id, product): """Initialize the Lyft sensor.""" self.data = products self._product_id = product_id self._product = product self._sensortype = sensorType self._name = f"{self._product['display_name']} {self._sensortype}" if "lyft" not in self._name.lower(): self._name = f"Lyft{self._name}" if self._sensortype == "time": self._unit_of_measurement = TIME_MINUTES elif self._sensortype == "price": estimate = self._product["estimate"] if estimate is not None: self._unit_of_measurement = estimate.get("currency") self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return the state attributes.""" params = { "Product ID": self._product["ride_type"], "Product display name": self._product["display_name"], "Vehicle Capacity": self._product["seats"], } if self._product.get("pricing_details") is not None: pricing_details = self._product["pricing_details"] params["Base price"] = pricing_details.get("base_charge") params["Cancellation fee"] = pricing_details.get("cancel_penalty_amount") params["Minimum price"] = pricing_details.get("cost_minimum") params["Cost per mile"] = pricing_details.get("cost_per_mile") params["Cost per minute"] = pricing_details.get("cost_per_minute") params["Price currency code"] = pricing_details.get("currency") params["Service fee"] = pricing_details.get("trust_and_service") if self._product.get("estimate") is not None: estimate = self._product["estimate"] params["Trip distance (in miles)"] = estimate.get( "estimated_distance_miles" ) params["High price estimate (in cents)"] = estimate.get( "estimated_cost_cents_max" ) params["Low price estimate (in cents)"] = estimate.get( "estimated_cost_cents_min" ) params["Trip duration (in seconds)"] = estimate.get( "estimated_duration_seconds" ) params["Prime Time percentage"] = estimate.get("primetime_percentage") if self._product.get("eta") is not None: eta = self._product["eta"] params["Pickup time estimate (in seconds)"] = eta.get("eta_seconds") return {k: v for k, v in params.items() if v is not None} @property def icon(self): """Icon to use in the frontend, if any.""" return ICON def update(self): """Get the latest data from the Lyft API and update the states.""" self.data.update() try: self._product = self.data.products[self._product_id] except KeyError: return self._state = None if self._sensortype == "time": eta = self._product["eta"] if (eta is not None) and (eta.get("is_valid_estimate")): time_estimate = eta.get("eta_seconds") if time_estimate is None: return self._state = int(time_estimate / 60) elif self._sensortype == "price": estimate = self._product["estimate"] if (estimate is not None) and estimate.get("is_valid_estimate"): self._state = ( int( ( estimate.get("estimated_cost_cents_min", 0) + estimate.get("estimated_cost_cents_max", 0) ) / 2 ) / 100 ) class LyftEstimate: """The class for handling the time and price estimate.""" def __init__( self, session, start_latitude, start_longitude, end_latitude=None, end_longitude=None, ): """Initialize the LyftEstimate object.""" self._session = session self.start_latitude = start_latitude self.start_longitude = start_longitude self.end_latitude = end_latitude self.end_longitude = end_longitude self.products = None @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest product info and estimates from the Lyft API.""" try: self.fetch_data() except APIError as exc: _LOGGER.error("Error fetching Lyft data: %s", exc) def fetch_data(self): """Get the latest product info and estimates from the Lyft API.""" client = LyftRidesClient(self._session) self.products = {} products_response = client.get_ride_types( self.start_latitude, self.start_longitude ) products = products_response.json.get("ride_types") for product in products: self.products[product["ride_type"]] = product if self.end_latitude is not None and self.end_longitude is not None: price_response = client.get_cost_estimates( self.start_latitude, self.start_longitude, self.end_latitude, self.end_longitude, ) prices = price_response.json.get("cost_estimates", []) for price in prices: product = self.products[price["ride_type"]] if price.get("is_valid_estimate"): product["estimate"] = price eta_response = client.get_pickup_time_estimates( self.start_latitude, self.start_longitude ) etas = eta_response.json.get("eta_estimates") for eta in etas: if eta.get("is_valid_estimate"): self.products[eta["ride_type"]]["eta"] = eta
apache-2.0
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/ARB/instanced_arrays.py
9
2044
'''OpenGL extension ARB.instanced_arrays This module customises the behaviour of the OpenGL.raw.GL.ARB.instanced_arrays to provide a more Python-friendly API Overview (from the spec) A common use case in GL for some applications is to be able to draw the same object, or groups of similar objects that share vertex data, primitive count and type, multiple times. This extension provides a means of accelerating such use cases while restricting the number of API calls, and keeping the amount of duplicate data to a minimum. In particular, this extension specifies an alternative to the read-only shader variable introduced by ARB_draw_instanced. It uses the same draw calls introduced by that extension, but redefines them so that a vertex shader can instead use vertex array attributes as a source of instance data. This extension introduces an array "divisor" for generic vertex array attributes, which when non-zero specifies that the attribute is "instanced." An instanced attribute does not advance per-vertex as usual, but rather after every <divisor> conceptual draw calls. (Attributes which aren't instanced are repeated in their entirety for every conceptual draw call.) By specifying transform data in an instanced attribute or series of instanced attributes, vertex shaders can, in concert with the instancing draw calls, draw multiple instances of an object with one draw call. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/instanced_arrays.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.ARB.instanced_arrays import * from OpenGL.raw.GL.ARB.instanced_arrays import _EXTENSION_NAME def glInitInstancedArraysARB(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
gpl-3.0
lasersonlab/pepsyn
setup.py
1
1298
# Copyright 2016 Uri Laserson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import versioneer from setuptools import find_packages, setup def readme(): with open("README.md", "r") as ip: return ip.read() setup( name="pepsyn", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="Peptide library design", long_description=readme(), long_description_content_type="text/markdown", url="https://github.com/lasersonlab/pepsyn", author="Laserson Lab", license="Apache License, Version 2.0", classifiers=["Programming Language :: Python :: 3"], packages=find_packages(), install_requires=["click", "tqdm", "biopython", "pyyaml", "pygtrie"], entry_points={"console_scripts": ["pepsyn = pepsyn.cli:cli"]}, )
apache-2.0
tudorvio/tempest
tempest/api/identity/admin/v2/test_endpoints.py
10
4128
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.identity import base from tempest.common.utils import data_utils from tempest import test class EndPointsTestJSON(base.BaseIdentityV2AdminTest): @classmethod def resource_setup(cls): super(EndPointsTestJSON, cls).resource_setup() cls.service_ids = list() s_name = data_utils.rand_name('service') s_type = data_utils.rand_name('type') s_description = data_utils.rand_name('description') cls.service_data =\ cls.client.create_service(s_name, s_type, description=s_description) cls.service_id = cls.service_data['id'] cls.service_ids.append(cls.service_id) # Create endpoints so as to use for LIST and GET test cases cls.setup_endpoints = list() for i in range(2): region = data_utils.rand_name('region') url = data_utils.rand_url() endpoint = cls.client.create_endpoint(cls.service_id, region, publicurl=url, adminurl=url, internalurl=url) # list_endpoints() will return 'enabled' field endpoint['enabled'] = True cls.setup_endpoints.append(endpoint) @classmethod def resource_cleanup(cls): for e in cls.setup_endpoints: cls.client.delete_endpoint(e['id']) for s in cls.service_ids: cls.client.delete_service(s) super(EndPointsTestJSON, cls).resource_cleanup() @test.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51') def test_list_endpoints(self): # Get a list of endpoints fetched_endpoints = self.client.list_endpoints() # Asserting LIST endpoints missing_endpoints =\ [e for e in self.setup_endpoints if e not in fetched_endpoints] self.assertEqual(0, len(missing_endpoints), "Failed to find endpoint %s in fetched list" % ', '.join(str(e) for e in missing_endpoints)) @test.idempotent_id('9974530a-aa28-4362-8403-f06db02b26c1') def test_create_list_delete_endpoint(self): region = data_utils.rand_name('region') url = data_utils.rand_url() endpoint = self.client.create_endpoint(self.service_id, region, publicurl=url, adminurl=url, internalurl=url) # Asserting Create Endpoint response body self.assertIn('id', endpoint) self.assertEqual(region, endpoint['region']) self.assertEqual(url, endpoint['publicurl']) # Checking if created endpoint is present in the list of endpoints fetched_endpoints = self.client.list_endpoints() fetched_endpoints_id = [e['id'] for e in fetched_endpoints] self.assertIn(endpoint['id'], fetched_endpoints_id) # Deleting the endpoint created in this method self.client.delete_endpoint(endpoint['id']) # Checking whether endpoint is deleted successfully fetched_endpoints = self.client.list_endpoints() fetched_endpoints_id = [e['id'] for e in fetched_endpoints] self.assertNotIn(endpoint['id'], fetched_endpoints_id)
apache-2.0
xindus40223115/w16b_test
static/Brython3.1.1-20150328-091302/Lib/optparse.py
728
60616
"""A powerful, extensible, and easy-to-use option parser. By Greg Ward <[email protected]> Originally distributed as Optik. For support, use the [email protected] mailing list (http://lists.sourceforge.net/lists/listinfo/optik-users). Simple usage example: from optparse import OptionParser parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="write report to FILE", metavar="FILE") parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout") (options, args) = parser.parse_args() """ __version__ = "1.5.3" __all__ = ['Option', 'make_option', 'SUPPRESS_HELP', 'SUPPRESS_USAGE', 'Values', 'OptionContainer', 'OptionGroup', 'OptionParser', 'HelpFormatter', 'IndentedHelpFormatter', 'TitledHelpFormatter', 'OptParseError', 'OptionError', 'OptionConflictError', 'OptionValueError', 'BadOptionError'] __copyright__ = """ Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import sys, os import textwrap def _repr(self): return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) # This file was generated from: # Id: option_parser.py 527 2006-07-23 15:21:30Z greg # Id: option.py 522 2006-06-11 16:22:03Z gward # Id: help.py 527 2006-07-23 15:21:30Z greg # Id: errors.py 509 2006-04-20 00:58:24Z gward try: from gettext import gettext, ngettext except ImportError: def gettext(message): return message def ngettext(singular, plural, n): if n == 1: return singular return plural _ = gettext class OptParseError (Exception): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class OptionError (OptParseError): """ Raised if an Option instance is created with invalid or inconsistent arguments. """ def __init__(self, msg, option): self.msg = msg self.option_id = str(option) def __str__(self): if self.option_id: return "option %s: %s" % (self.option_id, self.msg) else: return self.msg class OptionConflictError (OptionError): """ Raised if conflicting options are added to an OptionParser. """ class OptionValueError (OptParseError): """ Raised if an invalid option value is encountered on the command line. """ class BadOptionError (OptParseError): """ Raised if an invalid option is seen on the command line. """ def __init__(self, opt_str): self.opt_str = opt_str def __str__(self): return _("no such option: %s") % self.opt_str class AmbiguousOptionError (BadOptionError): """ Raised if an ambiguous option is seen on the command line. """ def __init__(self, opt_str, possibilities): BadOptionError.__init__(self, opt_str) self.possibilities = possibilities def __str__(self): return (_("ambiguous option: %s (%s?)") % (self.opt_str, ", ".join(self.possibilities))) class HelpFormatter: """ Abstract base class for formatting option help. OptionParser instances should use one of the HelpFormatter subclasses for formatting help; by default IndentedHelpFormatter is used. Instance attributes: parser : OptionParser the controlling OptionParser instance indent_increment : int the number of columns to indent per nesting level max_help_position : int the maximum starting column for option help text help_position : int the calculated starting column for option help text; initially the same as the maximum width : int total number of columns for output (pass None to constructor for this value to be taken from the $COLUMNS environment variable) level : int current indentation level current_indent : int current indentation level (in columns) help_width : int number of columns available for option help text (calculated) default_tag : str text to replace with each option's default value, "%default" by default. Set to false value to disable default value expansion. option_strings : { Option : str } maps Option instances to the snippet of help text explaining the syntax of that option, e.g. "-h, --help" or "-fFILE, --file=FILE" _short_opt_fmt : str format string controlling how short options with values are printed in help text. Must be either "%s%s" ("-fFILE") or "%s %s" ("-f FILE"), because those are the two syntaxes that Optik supports. _long_opt_fmt : str similar but for long options; must be either "%s %s" ("--file FILE") or "%s=%s" ("--file=FILE"). """ NO_DEFAULT_VALUE = "none" def __init__(self, indent_increment, max_help_position, width, short_first): self.parser = None self.indent_increment = indent_increment self.help_position = self.max_help_position = max_help_position if width is None: try: width = int(os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self.width = width self.current_indent = 0 self.level = 0 self.help_width = None # computed later self.short_first = short_first self.default_tag = "%default" self.option_strings = {} self._short_opt_fmt = "%s %s" self._long_opt_fmt = "%s=%s" def set_parser(self, parser): self.parser = parser def set_short_opt_delimiter(self, delim): if delim not in ("", " "): raise ValueError( "invalid metavar delimiter for short options: %r" % delim) self._short_opt_fmt = "%s" + delim + "%s" def set_long_opt_delimiter(self, delim): if delim not in ("=", " "): raise ValueError( "invalid metavar delimiter for long options: %r" % delim) self._long_opt_fmt = "%s" + delim + "%s" def indent(self): self.current_indent += self.indent_increment self.level += 1 def dedent(self): self.current_indent -= self.indent_increment assert self.current_indent >= 0, "Indent decreased below 0." self.level -= 1 def format_usage(self, usage): raise NotImplementedError("subclasses must implement") def format_heading(self, heading): raise NotImplementedError("subclasses must implement") def _format_text(self, text): """ Format a paragraph of free-form text for inclusion in the help output at the current indentation level. """ text_width = self.width - self.current_indent indent = " "*self.current_indent return textwrap.fill(text, text_width, initial_indent=indent, subsequent_indent=indent) def format_description(self, description): if description: return self._format_text(description) + "\n" else: return "" def format_epilog(self, epilog): if epilog: return "\n" + self._format_text(epilog) + "\n" else: return "" def expand_default(self, option): if self.parser is None or not self.default_tag: return option.help default_value = self.parser.defaults.get(option.dest) if default_value is NO_DEFAULT or default_value is None: default_value = self.NO_DEFAULT_VALUE return option.help.replace(self.default_tag, str(default_value)) def format_option(self, option): # The help for each option consists of two parts: # * the opt strings and metavars # eg. ("-x", or "-fFILENAME, --file=FILENAME") # * the user-supplied help string # eg. ("turn on expert mode", "read data from FILENAME") # # If possible, we write both of these on the same line: # -x turn on expert mode # # But if the opt string list is too long, we put the help # string on a second line, indented to the same column it would # start in if it fit on the first line. # -fFILENAME, --file=FILENAME # read data from FILENAME result = [] opts = self.option_strings[option] opt_width = self.help_position - self.current_indent - 2 if len(opts) > opt_width: opts = "%*s%s\n" % (self.current_indent, "", opts) indent_first = self.help_position else: # start help on same line as opts opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) indent_first = 0 result.append(opts) if option.help: help_text = self.expand_default(option) help_lines = textwrap.wrap(help_text, self.help_width) result.append("%*s%s\n" % (indent_first, "", help_lines[0])) result.extend(["%*s%s\n" % (self.help_position, "", line) for line in help_lines[1:]]) elif opts[-1] != "\n": result.append("\n") return "".join(result) def store_option_strings(self, parser): self.indent() max_len = 0 for opt in parser.option_list: strings = self.format_option_strings(opt) self.option_strings[opt] = strings max_len = max(max_len, len(strings) + self.current_indent) self.indent() for group in parser.option_groups: for opt in group.option_list: strings = self.format_option_strings(opt) self.option_strings[opt] = strings max_len = max(max_len, len(strings) + self.current_indent) self.dedent() self.dedent() self.help_position = min(max_len + 2, self.max_help_position) self.help_width = self.width - self.help_position def format_option_strings(self, option): """Return a comma-separated list of option strings & metavariables.""" if option.takes_value(): metavar = option.metavar or option.dest.upper() short_opts = [self._short_opt_fmt % (sopt, metavar) for sopt in option._short_opts] long_opts = [self._long_opt_fmt % (lopt, metavar) for lopt in option._long_opts] else: short_opts = option._short_opts long_opts = option._long_opts if self.short_first: opts = short_opts + long_opts else: opts = long_opts + short_opts return ", ".join(opts) class IndentedHelpFormatter (HelpFormatter): """Format help with indented section bodies. """ def __init__(self, indent_increment=2, max_help_position=24, width=None, short_first=1): HelpFormatter.__init__( self, indent_increment, max_help_position, width, short_first) def format_usage(self, usage): return _("Usage: %s\n") % usage def format_heading(self, heading): return "%*s%s:\n" % (self.current_indent, "", heading) class TitledHelpFormatter (HelpFormatter): """Format help with underlined section headers. """ def __init__(self, indent_increment=0, max_help_position=24, width=None, short_first=0): HelpFormatter.__init__ ( self, indent_increment, max_help_position, width, short_first) def format_usage(self, usage): return "%s %s\n" % (self.format_heading(_("Usage")), usage) def format_heading(self, heading): return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) def _parse_num(val, type): if val[:2].lower() == "0x": # hexadecimal radix = 16 elif val[:2].lower() == "0b": # binary radix = 2 val = val[2:] or "0" # have to remove "0b" prefix elif val[:1] == "0": # octal radix = 8 else: # decimal radix = 10 return type(val, radix) def _parse_int(val): return _parse_num(val, int) _builtin_cvt = { "int" : (_parse_int, _("integer")), "long" : (_parse_int, _("integer")), "float" : (float, _("floating-point")), "complex" : (complex, _("complex")) } def check_builtin(option, opt, value): (cvt, what) = _builtin_cvt[option.type] try: return cvt(value) except ValueError: raise OptionValueError( _("option %s: invalid %s value: %r") % (opt, what, value)) def check_choice(option, opt, value): if value in option.choices: return value else: choices = ", ".join(map(repr, option.choices)) raise OptionValueError( _("option %s: invalid choice: %r (choose from %s)") % (opt, value, choices)) # Not supplying a default is different from a default of None, # so we need an explicit "not supplied" value. NO_DEFAULT = ("NO", "DEFAULT") class Option: """ Instance attributes: _short_opts : [string] _long_opts : [string] action : string type : string dest : string default : any nargs : int const : any choices : [string] callback : function callback_args : (any*) callback_kwargs : { string : any } help : string metavar : string """ # The list of instance attributes that may be set through # keyword args to the constructor. ATTRS = ['action', 'type', 'dest', 'default', 'nargs', 'const', 'choices', 'callback', 'callback_args', 'callback_kwargs', 'help', 'metavar'] # The set of actions allowed by option parsers. Explicitly listed # here so the constructor can validate its arguments. ACTIONS = ("store", "store_const", "store_true", "store_false", "append", "append_const", "count", "callback", "help", "version") # The set of actions that involve storing a value somewhere; # also listed just for constructor argument validation. (If # the action is one of these, there must be a destination.) STORE_ACTIONS = ("store", "store_const", "store_true", "store_false", "append", "append_const", "count") # The set of actions for which it makes sense to supply a value # type, ie. which may consume an argument from the command line. TYPED_ACTIONS = ("store", "append", "callback") # The set of actions which *require* a value type, ie. that # always consume an argument from the command line. ALWAYS_TYPED_ACTIONS = ("store", "append") # The set of actions which take a 'const' attribute. CONST_ACTIONS = ("store_const", "append_const") # The set of known types for option parsers. Again, listed here for # constructor argument validation. TYPES = ("string", "int", "long", "float", "complex", "choice") # Dictionary of argument checking functions, which convert and # validate option arguments according to the option type. # # Signature of checking functions is: # check(option : Option, opt : string, value : string) -> any # where # option is the Option instance calling the checker # opt is the actual option seen on the command-line # (eg. "-a", "--file") # value is the option argument seen on the command-line # # The return value should be in the appropriate Python type # for option.type -- eg. an integer if option.type == "int". # # If no checker is defined for a type, arguments will be # unchecked and remain strings. TYPE_CHECKER = { "int" : check_builtin, "long" : check_builtin, "float" : check_builtin, "complex": check_builtin, "choice" : check_choice, } # CHECK_METHODS is a list of unbound method objects; they are called # by the constructor, in order, after all attributes are # initialized. The list is created and filled in later, after all # the methods are actually defined. (I just put it here because I # like to define and document all class attributes in the same # place.) Subclasses that add another _check_*() method should # define their own CHECK_METHODS list that adds their check method # to those from this class. CHECK_METHODS = None # -- Constructor/initialization methods ---------------------------- def __init__(self, *opts, **attrs): # Set _short_opts, _long_opts attrs from 'opts' tuple. # Have to be set now, in case no option strings are supplied. self._short_opts = [] self._long_opts = [] opts = self._check_opt_strings(opts) self._set_opt_strings(opts) # Set all other attrs (action, type, etc.) from 'attrs' dict self._set_attrs(attrs) # Check all the attributes we just set. There are lots of # complicated interdependencies, but luckily they can be farmed # out to the _check_*() methods listed in CHECK_METHODS -- which # could be handy for subclasses! The one thing these all share # is that they raise OptionError if they discover a problem. for checker in self.CHECK_METHODS: checker(self) def _check_opt_strings(self, opts): # Filter out None because early versions of Optik had exactly # one short option and one long option, either of which # could be None. opts = [opt for opt in opts if opt] if not opts: raise TypeError("at least one option string must be supplied") return opts def _set_opt_strings(self, opts): for opt in opts: if len(opt) < 2: raise OptionError( "invalid option string %r: " "must be at least two characters long" % opt, self) elif len(opt) == 2: if not (opt[0] == "-" and opt[1] != "-"): raise OptionError( "invalid short option string %r: " "must be of the form -x, (x any non-dash char)" % opt, self) self._short_opts.append(opt) else: if not (opt[0:2] == "--" and opt[2] != "-"): raise OptionError( "invalid long option string %r: " "must start with --, followed by non-dash" % opt, self) self._long_opts.append(opt) def _set_attrs(self, attrs): for attr in self.ATTRS: if attr in attrs: setattr(self, attr, attrs[attr]) del attrs[attr] else: if attr == 'default': setattr(self, attr, NO_DEFAULT) else: setattr(self, attr, None) if attrs: attrs = sorted(attrs.keys()) raise OptionError( "invalid keyword arguments: %s" % ", ".join(attrs), self) # -- Constructor validation methods -------------------------------- def _check_action(self): if self.action is None: self.action = "store" elif self.action not in self.ACTIONS: raise OptionError("invalid action: %r" % self.action, self) def _check_type(self): if self.type is None: if self.action in self.ALWAYS_TYPED_ACTIONS: if self.choices is not None: # The "choices" attribute implies "choice" type. self.type = "choice" else: # No type given? "string" is the most sensible default. self.type = "string" else: # Allow type objects or builtin type conversion functions # (int, str, etc.) as an alternative to their names. (The # complicated check of builtins is only necessary for # Python 2.1 and earlier, and is short-circuited by the # first check on modern Pythons.) import builtins if ( isinstance(self.type, type) or (hasattr(self.type, "__name__") and getattr(builtins, self.type.__name__, None) is self.type) ): self.type = self.type.__name__ if self.type == "str": self.type = "string" if self.type not in self.TYPES: raise OptionError("invalid option type: %r" % self.type, self) if self.action not in self.TYPED_ACTIONS: raise OptionError( "must not supply a type for action %r" % self.action, self) def _check_choice(self): if self.type == "choice": if self.choices is None: raise OptionError( "must supply a list of choices for type 'choice'", self) elif not isinstance(self.choices, (tuple, list)): raise OptionError( "choices must be a list of strings ('%s' supplied)" % str(type(self.choices)).split("'")[1], self) elif self.choices is not None: raise OptionError( "must not supply choices for type %r" % self.type, self) def _check_dest(self): # No destination given, and we need one for this action. The # self.type check is for callbacks that take a value. takes_value = (self.action in self.STORE_ACTIONS or self.type is not None) if self.dest is None and takes_value: # Glean a destination from the first long option string, # or from the first short option string if no long options. if self._long_opts: # eg. "--foo-bar" -> "foo_bar" self.dest = self._long_opts[0][2:].replace('-', '_') else: self.dest = self._short_opts[0][1] def _check_const(self): if self.action not in self.CONST_ACTIONS and self.const is not None: raise OptionError( "'const' must not be supplied for action %r" % self.action, self) def _check_nargs(self): if self.action in self.TYPED_ACTIONS: if self.nargs is None: self.nargs = 1 elif self.nargs is not None: raise OptionError( "'nargs' must not be supplied for action %r" % self.action, self) def _check_callback(self): if self.action == "callback": if not callable(self.callback): raise OptionError( "callback not callable: %r" % self.callback, self) if (self.callback_args is not None and not isinstance(self.callback_args, tuple)): raise OptionError( "callback_args, if supplied, must be a tuple: not %r" % self.callback_args, self) if (self.callback_kwargs is not None and not isinstance(self.callback_kwargs, dict)): raise OptionError( "callback_kwargs, if supplied, must be a dict: not %r" % self.callback_kwargs, self) else: if self.callback is not None: raise OptionError( "callback supplied (%r) for non-callback option" % self.callback, self) if self.callback_args is not None: raise OptionError( "callback_args supplied for non-callback option", self) if self.callback_kwargs is not None: raise OptionError( "callback_kwargs supplied for non-callback option", self) CHECK_METHODS = [_check_action, _check_type, _check_choice, _check_dest, _check_const, _check_nargs, _check_callback] # -- Miscellaneous methods ----------------------------------------- def __str__(self): return "/".join(self._short_opts + self._long_opts) __repr__ = _repr def takes_value(self): return self.type is not None def get_opt_string(self): if self._long_opts: return self._long_opts[0] else: return self._short_opts[0] # -- Processing methods -------------------------------------------- def check_value(self, opt, value): checker = self.TYPE_CHECKER.get(self.type) if checker is None: return value else: return checker(self, opt, value) def convert_value(self, opt, value): if value is not None: if self.nargs == 1: return self.check_value(opt, value) else: return tuple([self.check_value(opt, v) for v in value]) def process(self, opt, value, values, parser): # First, convert the value(s) to the right type. Howl if any # value(s) are bogus. value = self.convert_value(opt, value) # And then take whatever action is expected of us. # This is a separate method to make life easier for # subclasses to add new actions. return self.take_action( self.action, self.dest, opt, value, values, parser) def take_action(self, action, dest, opt, value, values, parser): if action == "store": setattr(values, dest, value) elif action == "store_const": setattr(values, dest, self.const) elif action == "store_true": setattr(values, dest, True) elif action == "store_false": setattr(values, dest, False) elif action == "append": values.ensure_value(dest, []).append(value) elif action == "append_const": values.ensure_value(dest, []).append(self.const) elif action == "count": setattr(values, dest, values.ensure_value(dest, 0) + 1) elif action == "callback": args = self.callback_args or () kwargs = self.callback_kwargs or {} self.callback(self, opt, value, parser, *args, **kwargs) elif action == "help": parser.print_help() parser.exit() elif action == "version": parser.print_version() parser.exit() else: raise ValueError("unknown action %r" % self.action) return 1 # class Option SUPPRESS_HELP = "SUPPRESS"+"HELP" SUPPRESS_USAGE = "SUPPRESS"+"USAGE" class Values: def __init__(self, defaults=None): if defaults: for (attr, val) in defaults.items(): setattr(self, attr, val) def __str__(self): return str(self.__dict__) __repr__ = _repr def __eq__(self, other): if isinstance(other, Values): return self.__dict__ == other.__dict__ elif isinstance(other, dict): return self.__dict__ == other else: return NotImplemented def _update_careful(self, dict): """ Update the option values from an arbitrary dictionary, but only use keys from dict that already have a corresponding attribute in self. Any keys in dict without a corresponding attribute are silently ignored. """ for attr in dir(self): if attr in dict: dval = dict[attr] if dval is not None: setattr(self, attr, dval) def _update_loose(self, dict): """ Update the option values from an arbitrary dictionary, using all keys from the dictionary regardless of whether they have a corresponding attribute in self or not. """ self.__dict__.update(dict) def _update(self, dict, mode): if mode == "careful": self._update_careful(dict) elif mode == "loose": self._update_loose(dict) else: raise ValueError("invalid update mode: %r" % mode) def read_module(self, modname, mode="careful"): __import__(modname) mod = sys.modules[modname] self._update(vars(mod), mode) def read_file(self, filename, mode="careful"): vars = {} exec(open(filename).read(), vars) self._update(vars, mode) def ensure_value(self, attr, value): if not hasattr(self, attr) or getattr(self, attr) is None: setattr(self, attr, value) return getattr(self, attr) class OptionContainer: """ Abstract base class. Class attributes: standard_option_list : [Option] list of standard options that will be accepted by all instances of this parser class (intended to be overridden by subclasses). Instance attributes: option_list : [Option] the list of Option objects contained by this OptionContainer _short_opt : { string : Option } dictionary mapping short option strings, eg. "-f" or "-X", to the Option instances that implement them. If an Option has multiple short option strings, it will appears in this dictionary multiple times. [1] _long_opt : { string : Option } dictionary mapping long option strings, eg. "--file" or "--exclude", to the Option instances that implement them. Again, a given Option can occur multiple times in this dictionary. [1] defaults : { string : any } dictionary mapping option destination names to default values for each destination [1] [1] These mappings are common to (shared by) all components of the controlling OptionParser, where they are initially created. """ def __init__(self, option_class, conflict_handler, description): # Initialize the option list and related data structures. # This method must be provided by subclasses, and it must # initialize at least the following instance attributes: # option_list, _short_opt, _long_opt, defaults. self._create_option_list() self.option_class = option_class self.set_conflict_handler(conflict_handler) self.set_description(description) def _create_option_mappings(self): # For use by OptionParser constructor -- create the master # option mappings used by this OptionParser and all # OptionGroups that it owns. self._short_opt = {} # single letter -> Option instance self._long_opt = {} # long option -> Option instance self.defaults = {} # maps option dest -> default value def _share_option_mappings(self, parser): # For use by OptionGroup constructor -- use shared option # mappings from the OptionParser that owns this OptionGroup. self._short_opt = parser._short_opt self._long_opt = parser._long_opt self.defaults = parser.defaults def set_conflict_handler(self, handler): if handler not in ("error", "resolve"): raise ValueError("invalid conflict_resolution value %r" % handler) self.conflict_handler = handler def set_description(self, description): self.description = description def get_description(self): return self.description def destroy(self): """see OptionParser.destroy().""" del self._short_opt del self._long_opt del self.defaults # -- Option-adding methods ----------------------------------------- def _check_conflict(self, option): conflict_opts = [] for opt in option._short_opts: if opt in self._short_opt: conflict_opts.append((opt, self._short_opt[opt])) for opt in option._long_opts: if opt in self._long_opt: conflict_opts.append((opt, self._long_opt[opt])) if conflict_opts: handler = self.conflict_handler if handler == "error": raise OptionConflictError( "conflicting option string(s): %s" % ", ".join([co[0] for co in conflict_opts]), option) elif handler == "resolve": for (opt, c_option) in conflict_opts: if opt.startswith("--"): c_option._long_opts.remove(opt) del self._long_opt[opt] else: c_option._short_opts.remove(opt) del self._short_opt[opt] if not (c_option._short_opts or c_option._long_opts): c_option.container.option_list.remove(c_option) def add_option(self, *args, **kwargs): """add_option(Option) add_option(opt_str, ..., kwarg=val, ...) """ if isinstance(args[0], str): option = self.option_class(*args, **kwargs) elif len(args) == 1 and not kwargs: option = args[0] if not isinstance(option, Option): raise TypeError("not an Option instance: %r" % option) else: raise TypeError("invalid arguments") self._check_conflict(option) self.option_list.append(option) option.container = self for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option if option.dest is not None: # option has a dest, we need a default if option.default is not NO_DEFAULT: self.defaults[option.dest] = option.default elif option.dest not in self.defaults: self.defaults[option.dest] = None return option def add_options(self, option_list): for option in option_list: self.add_option(option) # -- Option query/removal methods ---------------------------------- def get_option(self, opt_str): return (self._short_opt.get(opt_str) or self._long_opt.get(opt_str)) def has_option(self, opt_str): return (opt_str in self._short_opt or opt_str in self._long_opt) def remove_option(self, opt_str): option = self._short_opt.get(opt_str) if option is None: option = self._long_opt.get(opt_str) if option is None: raise ValueError("no such option %r" % opt_str) for opt in option._short_opts: del self._short_opt[opt] for opt in option._long_opts: del self._long_opt[opt] option.container.option_list.remove(option) # -- Help-formatting methods --------------------------------------- def format_option_help(self, formatter): if not self.option_list: return "" result = [] for option in self.option_list: if not option.help is SUPPRESS_HELP: result.append(formatter.format_option(option)) return "".join(result) def format_description(self, formatter): return formatter.format_description(self.get_description()) def format_help(self, formatter): result = [] if self.description: result.append(self.format_description(formatter)) if self.option_list: result.append(self.format_option_help(formatter)) return "\n".join(result) class OptionGroup (OptionContainer): def __init__(self, parser, title, description=None): self.parser = parser OptionContainer.__init__( self, parser.option_class, parser.conflict_handler, description) self.title = title def _create_option_list(self): self.option_list = [] self._share_option_mappings(self.parser) def set_title(self, title): self.title = title def destroy(self): """see OptionParser.destroy().""" OptionContainer.destroy(self) del self.option_list # -- Help-formatting methods --------------------------------------- def format_help(self, formatter): result = formatter.format_heading(self.title) formatter.indent() result += OptionContainer.format_help(self, formatter) formatter.dedent() return result class OptionParser (OptionContainer): """ Class attributes: standard_option_list : [Option] list of standard options that will be accepted by all instances of this parser class (intended to be overridden by subclasses). Instance attributes: usage : string a usage string for your program. Before it is displayed to the user, "%prog" will be expanded to the name of your program (self.prog or os.path.basename(sys.argv[0])). prog : string the name of the current program (to override os.path.basename(sys.argv[0])). description : string A paragraph of text giving a brief overview of your program. optparse reformats this paragraph to fit the current terminal width and prints it when the user requests help (after usage, but before the list of options). epilog : string paragraph of help text to print after option help option_groups : [OptionGroup] list of option groups in this parser (option groups are irrelevant for parsing the command-line, but very useful for generating help) allow_interspersed_args : bool = true if true, positional arguments may be interspersed with options. Assuming -a and -b each take a single argument, the command-line -ablah foo bar -bboo baz will be interpreted the same as -ablah -bboo -- foo bar baz If this flag were false, that command line would be interpreted as -ablah -- foo bar -bboo baz -- ie. we stop processing options as soon as we see the first non-option argument. (This is the tradition followed by Python's getopt module, Perl's Getopt::Std, and other argument- parsing libraries, but it is generally annoying to users.) process_default_values : bool = true if true, option default values are processed similarly to option values from the command line: that is, they are passed to the type-checking function for the option's type (as long as the default value is a string). (This really only matters if you have defined custom types; see SF bug #955889.) Set it to false to restore the behaviour of Optik 1.4.1 and earlier. rargs : [string] the argument list currently being parsed. Only set when parse_args() is active, and continually trimmed down as we consume arguments. Mainly there for the benefit of callback options. largs : [string] the list of leftover arguments that we have skipped while parsing options. If allow_interspersed_args is false, this list is always empty. values : Values the set of option values currently being accumulated. Only set when parse_args() is active. Also mainly for callbacks. Because of the 'rargs', 'largs', and 'values' attributes, OptionParser is not thread-safe. If, for some perverse reason, you need to parse command-line arguments simultaneously in different threads, use different OptionParser instances. """ standard_option_list = [] def __init__(self, usage=None, option_list=None, option_class=Option, version=None, conflict_handler="error", description=None, formatter=None, add_help_option=True, prog=None, epilog=None): OptionContainer.__init__( self, option_class, conflict_handler, description) self.set_usage(usage) self.prog = prog self.version = version self.allow_interspersed_args = True self.process_default_values = True if formatter is None: formatter = IndentedHelpFormatter() self.formatter = formatter self.formatter.set_parser(self) self.epilog = epilog # Populate the option list; initial sources are the # standard_option_list class attribute, the 'option_list' # argument, and (if applicable) the _add_version_option() and # _add_help_option() methods. self._populate_option_list(option_list, add_help=add_help_option) self._init_parsing_state() def destroy(self): """ Declare that you are done with this OptionParser. This cleans up reference cycles so the OptionParser (and all objects referenced by it) can be garbage-collected promptly. After calling destroy(), the OptionParser is unusable. """ OptionContainer.destroy(self) for group in self.option_groups: group.destroy() del self.option_list del self.option_groups del self.formatter # -- Private methods ----------------------------------------------- # (used by our or OptionContainer's constructor) def _create_option_list(self): self.option_list = [] self.option_groups = [] self._create_option_mappings() def _add_help_option(self): self.add_option("-h", "--help", action="help", help=_("show this help message and exit")) def _add_version_option(self): self.add_option("--version", action="version", help=_("show program's version number and exit")) def _populate_option_list(self, option_list, add_help=True): if self.standard_option_list: self.add_options(self.standard_option_list) if option_list: self.add_options(option_list) if self.version: self._add_version_option() if add_help: self._add_help_option() def _init_parsing_state(self): # These are set in parse_args() for the convenience of callbacks. self.rargs = None self.largs = None self.values = None # -- Simple modifier methods --------------------------------------- def set_usage(self, usage): if usage is None: self.usage = _("%prog [options]") elif usage is SUPPRESS_USAGE: self.usage = None # For backwards compatibility with Optik 1.3 and earlier. elif usage.lower().startswith("usage: "): self.usage = usage[7:] else: self.usage = usage def enable_interspersed_args(self): """Set parsing to not stop on the first non-option, allowing interspersing switches with command arguments. This is the default behavior. See also disable_interspersed_args() and the class documentation description of the attribute allow_interspersed_args.""" self.allow_interspersed_args = True def disable_interspersed_args(self): """Set parsing to stop on the first non-option. Use this if you have a command processor which runs another command that has options of its own and you want to make sure these options don't get confused. """ self.allow_interspersed_args = False def set_process_default_values(self, process): self.process_default_values = process def set_default(self, dest, value): self.defaults[dest] = value def set_defaults(self, **kwargs): self.defaults.update(kwargs) def _get_all_options(self): options = self.option_list[:] for group in self.option_groups: options.extend(group.option_list) return options def get_default_values(self): if not self.process_default_values: # Old, pre-Optik 1.5 behaviour. return Values(self.defaults) defaults = self.defaults.copy() for option in self._get_all_options(): default = defaults.get(option.dest) if isinstance(default, str): opt_str = option.get_opt_string() defaults[option.dest] = option.check_value(opt_str, default) return Values(defaults) # -- OptionGroup methods ------------------------------------------- def add_option_group(self, *args, **kwargs): # XXX lots of overlap with OptionContainer.add_option() if isinstance(args[0], str): group = OptionGroup(self, *args, **kwargs) elif len(args) == 1 and not kwargs: group = args[0] if not isinstance(group, OptionGroup): raise TypeError("not an OptionGroup instance: %r" % group) if group.parser is not self: raise ValueError("invalid OptionGroup (wrong parser)") else: raise TypeError("invalid arguments") self.option_groups.append(group) return group def get_option_group(self, opt_str): option = (self._short_opt.get(opt_str) or self._long_opt.get(opt_str)) if option and option.container is not self: return option.container return None # -- Option-parsing methods ---------------------------------------- def _get_args(self, args): if args is None: return sys.argv[1:] else: return args[:] # don't modify caller's list def parse_args(self, args=None, values=None): """ parse_args(args : [string] = sys.argv[1:], values : Values = None) -> (values : Values, args : [string]) Parse the command-line options found in 'args' (default: sys.argv[1:]). Any errors result in a call to 'error()', which by default prints the usage message to stderr and calls sys.exit() with an error message. On success returns a pair (values, args) where 'values' is an Values instance (with all your option values) and 'args' is the list of arguments left over after parsing options. """ rargs = self._get_args(args) if values is None: values = self.get_default_values() # Store the halves of the argument list as attributes for the # convenience of callbacks: # rargs # the rest of the command-line (the "r" stands for # "remaining" or "right-hand") # largs # the leftover arguments -- ie. what's left after removing # options and their arguments (the "l" stands for "leftover" # or "left-hand") self.rargs = rargs self.largs = largs = [] self.values = values try: stop = self._process_args(largs, rargs, values) except (BadOptionError, OptionValueError) as err: self.error(str(err)) args = largs + rargs return self.check_values(values, args) def check_values(self, values, args): """ check_values(values : Values, args : [string]) -> (values : Values, args : [string]) Check that the supplied option values and leftover arguments are valid. Returns the option values and leftover arguments (possibly adjusted, possibly completely new -- whatever you like). Default implementation just returns the passed-in values; subclasses may override as desired. """ return (values, args) def _process_args(self, largs, rargs, values): """_process_args(largs : [string], rargs : [string], values : Values) Process command-line arguments and populate 'values', consuming options and arguments from 'rargs'. If 'allow_interspersed_args' is false, stop at the first non-option argument. If true, accumulate any interspersed non-option arguments in 'largs'. """ while rargs: arg = rargs[0] # We handle bare "--" explicitly, and bare "-" is handled by the # standard arg handler since the short arg case ensures that the # len of the opt string is greater than 1. if arg == "--": del rargs[0] return elif arg[0:2] == "--": # process a single long option (possibly with value(s)) self._process_long_opt(rargs, values) elif arg[:1] == "-" and len(arg) > 1: # process a cluster of short options (possibly with # value(s) for the last one only) self._process_short_opts(rargs, values) elif self.allow_interspersed_args: largs.append(arg) del rargs[0] else: return # stop now, leave this arg in rargs # Say this is the original argument list: # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] # ^ # (we are about to process arg(i)). # # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of # [arg0, ..., arg(i-1)] (any options and their arguments will have # been removed from largs). # # The while loop will usually consume 1 or more arguments per pass. # If it consumes 1 (eg. arg is an option that takes no arguments), # then after _process_arg() is done the situation is: # # largs = subset of [arg0, ..., arg(i)] # rargs = [arg(i+1), ..., arg(N-1)] # # If allow_interspersed_args is false, largs will always be # *empty* -- still a subset of [arg0, ..., arg(i-1)], but # not a very interesting subset! def _match_long_opt(self, opt): """_match_long_opt(opt : string) -> string Determine which long option string 'opt' matches, ie. which one it is an unambiguous abbreviation for. Raises BadOptionError if 'opt' doesn't unambiguously match any long option string. """ return _match_abbrev(opt, self._long_opt) def _process_long_opt(self, rargs, values): arg = rargs.pop(0) # Value explicitly attached to arg? Pretend it's the next # argument. if "=" in arg: (opt, next_arg) = arg.split("=", 1) rargs.insert(0, next_arg) had_explicit_value = True else: opt = arg had_explicit_value = False opt = self._match_long_opt(opt) option = self._long_opt[opt] if option.takes_value(): nargs = option.nargs if len(rargs) < nargs: self.error(ngettext( "%(option)s option requires %(number)d argument", "%(option)s option requires %(number)d arguments", nargs) % {"option": opt, "number": nargs}) elif nargs == 1: value = rargs.pop(0) else: value = tuple(rargs[0:nargs]) del rargs[0:nargs] elif had_explicit_value: self.error(_("%s option does not take a value") % opt) else: value = None option.process(opt, value, values, self) def _process_short_opts(self, rargs, values): arg = rargs.pop(0) stop = False i = 1 for ch in arg[1:]: opt = "-" + ch option = self._short_opt.get(opt) i += 1 # we have consumed a character if not option: raise BadOptionError(opt) if option.takes_value(): # Any characters left in arg? Pretend they're the # next arg, and stop consuming characters of arg. if i < len(arg): rargs.insert(0, arg[i:]) stop = True nargs = option.nargs if len(rargs) < nargs: self.error(ngettext( "%(option)s option requires %(number)d argument", "%(option)s option requires %(number)d arguments", nargs) % {"option": opt, "number": nargs}) elif nargs == 1: value = rargs.pop(0) else: value = tuple(rargs[0:nargs]) del rargs[0:nargs] else: # option doesn't take a value value = None option.process(opt, value, values, self) if stop: break # -- Feedback methods ---------------------------------------------- def get_prog_name(self): if self.prog is None: return os.path.basename(sys.argv[0]) else: return self.prog def expand_prog_name(self, s): return s.replace("%prog", self.get_prog_name()) def get_description(self): return self.expand_prog_name(self.description) def exit(self, status=0, msg=None): if msg: sys.stderr.write(msg) sys.exit(status) def error(self, msg): """error(msg : string) Print a usage message incorporating 'msg' to stderr and exit. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(sys.stderr) self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) def get_usage(self): if self.usage: return self.formatter.format_usage( self.expand_prog_name(self.usage)) else: return "" def print_usage(self, file=None): """print_usage(file : file = stdout) Print the usage message for the current program (self.usage) to 'file' (default stdout). Any occurrence of the string "%prog" in self.usage is replaced with the name of the current program (basename of sys.argv[0]). Does nothing if self.usage is empty or not defined. """ if self.usage: print(self.get_usage(), file=file) def get_version(self): if self.version: return self.expand_prog_name(self.version) else: return "" def print_version(self, file=None): """print_version(file : file = stdout) Print the version message for this program (self.version) to 'file' (default stdout). As with print_usage(), any occurrence of "%prog" in self.version is replaced by the current program's name. Does nothing if self.version is empty or undefined. """ if self.version: print(self.get_version(), file=file) def format_option_help(self, formatter=None): if formatter is None: formatter = self.formatter formatter.store_option_strings(self) result = [] result.append(formatter.format_heading(_("Options"))) formatter.indent() if self.option_list: result.append(OptionContainer.format_option_help(self, formatter)) result.append("\n") for group in self.option_groups: result.append(group.format_help(formatter)) result.append("\n") formatter.dedent() # Drop the last "\n", or the header if no options or option groups: return "".join(result[:-1]) def format_epilog(self, formatter): return formatter.format_epilog(self.epilog) def format_help(self, formatter=None): if formatter is None: formatter = self.formatter result = [] if self.usage: result.append(self.get_usage() + "\n") if self.description: result.append(self.format_description(formatter) + "\n") result.append(self.format_option_help(formatter)) result.append(self.format_epilog(formatter)) return "".join(result) def print_help(self, file=None): """print_help(file : file = stdout) Print an extended help message, listing all options and any help text provided with them, to 'file' (default stdout). """ if file is None: file = sys.stdout file.write(self.format_help()) # class OptionParser def _match_abbrev(s, wordmap): """_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError. """ # Is there an exact match? if s in wordmap: return s else: # Isolate all words with s as a prefix. possibilities = [word for word in wordmap.keys() if word.startswith(s)] # No exact match, so there had better be just one possibility. if len(possibilities) == 1: return possibilities[0] elif not possibilities: raise BadOptionError(s) else: # More than one possible completion: ambiguous prefix. possibilities.sort() raise AmbiguousOptionError(s, possibilities) # Some day, there might be many Option classes. As of Optik 1.3, the # preferred way to instantiate Options is indirectly, via make_option(), # which will become a factory function when there are many Option # classes. make_option = Option
gpl-3.0
elbeardmorez/quodlibet
quodlibet/quodlibet/ext/events/mpdserver/__init__.py
1
6272
# -*- coding: utf-8 -*- # Copyright 2014 Christoph Reiter <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. import os if os.name == "nt": from quodlibet.plugins import PluginNotSupportedError # we are missing socket.fromfd on Windows raise PluginNotSupportedError import socket from gi.repository import Gtk from quodlibet import _ from quodlibet.plugins import PluginConfigMixin from quodlibet.plugins.events import EventPlugin from quodlibet import app from quodlibet import qltk from quodlibet import config from quodlibet.qltk.entry import UndoEntry from quodlibet.qltk import Icons from quodlibet.util import print_w, print_d from quodlibet.util.thread import call_async, Cancellable from .main import MPDServer from .tcpserver import ServerError from .avahi import AvahiService, AvahiError def fetch_local_ip(): """Returns a guess for the local IP""" try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) addr = s.getsockname()[0] s.close() except EnvironmentError: addr = "?.?.?.?" return addr DEFAULT_PORT = 6600 def get_port_num(): return config.getint("plugins", "mpdserver_port", DEFAULT_PORT) def set_port_num(value): return config.set("plugins", "mpdserver_port", str(value)) class MPDServerPlugin(EventPlugin, PluginConfigMixin): PLUGIN_ID = "mpd_server" PLUGIN_NAME = _("MPD Server") PLUGIN_DESC = _("Allows remote control of Quod Libet using an MPD Client. " "Streaming, playlist and library management " "are not supported.") PLUGIN_ICON = Icons.NETWORK_WORKGROUP CONFIG_SECTION = "mpdserver" _server = None def PluginPreferences(self, parent): table = Gtk.Table(n_rows=3, n_columns=3) table.set_col_spacings(6) table.set_row_spacings(6) label = Gtk.Label(label=_("_Port:"), use_underline=True) label.set_alignment(0.0, 0.5) table.attach(label, 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK) entry = UndoEntry() entry.set_text(str(get_port_num())) def validate_port(entry, text, *args): try: int(text) except ValueError: entry.stop_emission("insert-text") entry.connect("insert-text", validate_port) def port_activate(entry, *args): try: port_num = int(entry.get_text()) except ValueError as e: print_w(e) else: if get_port_num() != port_num: set_port_num(port_num) self._refresh() entry.connect_after("activate", port_activate) entry.connect_after("focus-out-event", port_activate) table.attach(entry, 1, 2, 1, 2) port_revert = Gtk.Button() port_revert.add(Gtk.Image.new_from_icon_name( Icons.DOCUMENT_REVERT, Gtk.IconSize.MENU)) def port_revert_cb(button, entry): entry.set_text(str(DEFAULT_PORT)) entry.emit("activate") port_revert.connect("clicked", port_revert_cb, entry) table.attach( port_revert, 2, 3, 1, 2, xoptions=Gtk.AttachOptions.SHRINK) label = Gtk.Label(label=_("Local _IP:"), use_underline=True) label.set_alignment(0.0, 0.5) table.attach(label, 0, 1, 0, 1, xoptions=Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK) label = Gtk.Label(label=_("P_assword:"), use_underline=True) label.set_alignment(0.0, 0.5) table.attach(label, 0, 1, 2, 3, xoptions=Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK) entry = UndoEntry() entry.set_text(self.config_get("password")) entry.connect('changed', self.config_entry_changed, "password") table.attach(entry, 1, 3, 2, 3) label = Gtk.Label() label.set_padding(6, 6) label.set_alignment(0.0, 0.5) label.set_selectable(True) label.set_label("...") table.attach(label, 1, 3, 0, 1) cancel = Cancellable() label.connect("destroy", lambda *x: cancel.cancel()) call_async(fetch_local_ip, cancel, label.set_label) box = Gtk.VBox(spacing=12) clients = Gtk.Label() clients.set_padding(6, 6) clients.set_markup(u"""\ \u2022 <a href="https://play.google.com/store/apps/details?id=com.\ namelessdev.mpdroid">MPDroid</a> (Android) \u2022 <a href="https://play.google.com/store/apps/details?id=org.\ gateshipone.malp">M.A.L.P.</a> (Android) """) clients.set_alignment(0, 0) box.pack_start( qltk.Frame(_("Connection"), child=table), False, True, 0) box.pack_start( qltk.Frame(_("Tested Clients"), child=clients), True, True, 0) return box def _refresh(self): # only restart if it was running if self._server: self._disable_server() self._enable_server() self._update_avahi() def _enable_server(self): port_num = get_port_num() print_d("Starting MPD server on port %d" % port_num) self._server = MPDServer(app, self, port_num) try: self._server.start() except ServerError as e: print_w(e) def _disable_server(self): print_d("Stopping MPD server") self._server.stop() self._server = None def _update_avahi(self): assert self._avahi port_num = get_port_num() try: self._avahi.register(app.name, port_num, "_mpd._tcp") except AvahiError as e: print_w(e) def enabled(self): self._enable_server() self._avahi = AvahiService() self._update_avahi() def disabled(self): self._avahi.unregister() self._avahi = None self._disable_server()
gpl-2.0
pierreberthet/local-scripts
dyndopa.py
1
3674
import numpy as np import json import matplotlib #matplotlib.use('Agg') import pylab as pl import pprint as pp import sys # Plot the different figures for the merged spikes and voltages recordings. # This file, as the MergeSpikefiles.py should be one level up than Test/..., the output of a simulation. path = sys.argv[1]+'/' fparam = path+'Test/Parameters/simulation_parameters.json' f = open(fparam, 'r') params = json.load(f) si = 13 parms = { 'axes.labelsize': si, 'text.fontsize': si, 'legend.fontsize': si, 'xtick.labelsize': si, 'ytick.labelsize': si, 'text.usetex': False #'figure.figsize': [6., 7.] } pl.rcParams.update(parms) #pp.pprint(params) recorder_type = 'spikes' cell = 'rew' outcome = np.loadtxt(path+params['rewards_multi_fn']+'_0') data = np.loadtxt(path+params['spiketimes_folder']+cell+'_merged_'+recorder_type+'.dat' ) color = ['b','g', 'r', 'c', 'm', 'y', 'k'] color = ['b','y', 'k', 'g', 'r', 'm', 'c'] z = 0 cl = color[z%len(color)] cl = 'k' size = 5. print 'SPIKES' ymax = 0. fig = pl.figure(123,(8,5)) ax = fig.add_subplot(211) spread = 2 x = len(outcome) * spread tempd =np.sort(data[:,1])[params['t_init']:] #distrib = np.histogram(tempd, x)[0] #distrib = np.histogram(data[:,1], x)[0] #distrib = distrib[0] / float(np.sum(distrib[0])) #distrib = distrib - np.mean(distrib) #adapt = np.max(distrib) #y=np.mean(distrib) #pl.title('dopamine dynamic') if len(sys.argv)==4: start = int(sys.argv[2]) end= int(sys.argv[3]) data[:,0]-=min(data[:,0]) gids = [] ttime = [] for i in xrange(len(data[:,0])): if data[i,1]>start and data[i,1]<end: gids.append(data[i,0]) ttime.append(data[i,1]) ax.hist(ttime, len(ttime)/100., facecolor='black') # ax.set_xlim([start,end]) else: ax.hist(tempd, len(tempd)/100., facecolor='black') ax.set_xlim([0,params['t_sim']]) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis='x', direction='out') ax.tick_params(axis='y', length=0) pl.xticks(pl.xticks()[0],[str(int(a/1000.)) for a in pl.xticks()[0]]) #pl.xticks([]) pl.yticks(pl.yticks()[0],[str(int(a/10.)) for a in pl.yticks()[0]]) #ax.grid(axis='y', color="0.9", linestyle='-', linewidth=1) ax.set_axisbelow(True) pl.ylabel('Firing rate [Hz]') bx = fig.add_subplot(212) print cell data = np.loadtxt(path+params['spiketimes_folder']+cell+'_merged_'+recorder_type+'.dat' ) if len(data)<2: print 'no data in ', cell else: if len(sys.argv)==4: bx.scatter(ttime, gids, c=cl, s=size,label=cell, marker="|") bx.set_xlim([start,end]) else: bx.scatter(data[:,1], data[:,0], c=cl, s=size,label=cell, marker="|") bx.set_xlim([0,params['t_sim']]) bx.set_ylim([0., max(gids)]) bx.spines['top'].set_visible(False) bx.spines['right'].set_visible(False) bx.spines['left'].set_visible(False) bx.get_xaxis().tick_bottom() bx.get_yaxis().tick_left() bx.tick_params(axis='x', direction='out') bx.tick_params(axis='y', length=0) #bx.grid(axis='y', color="0.9", linestyle='-', linewidth=1) bx.set_axisbelow(True) pl.xlabel('Time [s]') pl.ylabel('Dopaminergic neuron') pl.xticks(pl.xticks()[0],[str(int(a/1000.)) for a in pl.xticks()[0]]) pl.subplots_adjust(left = .04, bottom=.04, right=.97, top=.97) #figman = pl.get_current_fig_manager() #figman.frame.Maximize(True) if len(sys.argv)==4: pl.savefig('zoom.pdf', bbox_inches='tight', dpi=1000) pl.savefig('zoom.png', bbox_inches='tight', dpi=1000) pl.savefig('zoom.tiff', bbox_inches='tight', dpi=1000) pl.show()
gpl-2.0
supersven/intellij-community
python/lib/Lib/site-packages/django/contrib/admin/__init__.py
246
1608
# ACTION_CHECKBOX_NAME is unused, but should stay since its import from here # has been referenced in documentation. from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL from django.contrib.admin.options import StackedInline, TabularInline from django.contrib.admin.sites import AdminSite, site def autodiscover(): """ Auto-discover INSTALLED_APPS admin.py modules and fail silently when not present. This forces an import on them to register any admin bits they may want. """ import copy from django.conf import settings from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: mod = import_module(app) # Attempt to import the app's admin module. try: before_import_registry = copy.copy(site._registry) import_module('%s.admin' % app) except: # Reset the model registry to the state before the last import as # this import will have to reoccur on the next request and this # could raise NotRegistered and AlreadyRegistered exceptions # (see #8245). site._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have an admin module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(mod, 'admin'): raise
apache-2.0
stefanvanwouw/puppet-spark
files/spark/python/examples/kmeans.py
10
2247
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This example requires numpy (http://www.numpy.org/) """ import sys import numpy as np from pyspark import SparkContext def parseVector(line): return np.array([float(x) for x in line.split(' ')]) def closestPoint(p, centers): bestIndex = 0 closest = float("+inf") for i in range(len(centers)): tempDist = np.sum((p - centers[i]) ** 2) if tempDist < closest: closest = tempDist bestIndex = i return bestIndex if __name__ == "__main__": if len(sys.argv) < 5: print >> sys.stderr, "Usage: kmeans <master> <file> <k> <convergeDist>" exit(-1) sc = SparkContext(sys.argv[1], "PythonKMeans") lines = sc.textFile(sys.argv[2]) data = lines.map(parseVector).cache() K = int(sys.argv[3]) convergeDist = float(sys.argv[4]) # TODO: change this after we port takeSample() #kPoints = data.takeSample(False, K, 34) kPoints = data.take(K) tempDist = 1.0 while tempDist > convergeDist: closest = data.map( lambda p : (closestPoint(p, kPoints), (p, 1))) pointStats = closest.reduceByKey( lambda (x1, y1), (x2, y2): (x1 + x2, y1 + y2)) newPoints = pointStats.map( lambda (x, (y, z)): (x, y / z)).collect() tempDist = sum(np.sum((kPoints[x] - y) ** 2) for (x, y) in newPoints) for (x, y) in newPoints: kPoints[x] = y print "Final centers: " + str(kPoints)
mit
HeadCow/ARPS
report_spider/report_spider/spiders/THU004.py
3
2439
# -*- coding:utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8') import time import scrapy from Global_function import get_localtime, print_new_number, save_messages now_time = get_localtime(time.strftime("%Y-%m-%d", time.localtime())) # now_time = 20170401 class THU004_Spider(scrapy.Spider): name = 'THU004' start_urls = ['http://www.chemeng.tsinghua.edu.cn/podcast.do?method=news&cid=34'] domain = 'http://www.chemeng.tsinghua.edu.cn/' counts = 0 def parse(self, response): messages = response.xpath("//div[@class='employlist']/ul/li") for i in xrange(len(messages)): report_url = self.domain + messages[i].xpath(".//a/@href").extract()[0] report_time = get_localtime(messages[i].xpath(".//cite/text()").extract()[0].strip().strip('[]')) yield scrapy.Request(report_url, callback=self.parse_pages, meta={'link': report_url, 'number': i + 1}) def parse_pages(self, response): messages = response.xpath("//td[@height='400']/p") title = response.xpath("//h4/text()").extract()[0].strip() time, address, speaker, img_url = '', '', '', '' for message in messages: text = self.get_messages(message) if u'时间:' in text or u'时间:' in text: time = self.connect_messages(text, ':') if u'时间:' in text else self.connect_messages(text, ':') if u'地点:' in text or u'地点:' in text: address = self.connect_messages(text, ':') if u'地点:' in text else self.connect_messages(text, ':') if u'报告人:' in text or u'报告人:' in text: speaker = self.connect_messages(text, ':') if u'报告人:' in text else self.connect_messages(text, ':') img = message.xpath(".//img/@src") img_url = (self.domain + img.extract()[0][1:]) if len(img) > 0 else '' if title != '': self.counts += 1 print_new_number(self.counts, 'THU', self.name) all_messages = save_messages('THU', self.name, title, time, address, speaker, '', '', img_url, response.meta['link'], response.meta['number'], u'清华大学', u'化学工程系') return all_messages def get_messages(self, messages): text = '' message = messages.xpath(".//text()").extract() for each in message: text += each.strip() return text def connect_messages(self, messages, sign): message = messages.split(sign)[1:] text = '' for i in xrange(len(message)): if i > 0: text += ':' text += message[i].strip() return text
mit
darkleons/BE
addons/account_voucher/report/account_voucher_sales_receipt.py
326
5808
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp import tools class sale_receipt_report(osv.osv): _name = "sale.receipt.report" _description = "Sales Receipt Statistics" _auto = False _rec_name = 'date' _columns = { 'date': fields.date('Date', readonly=True), 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True), 'journal_id': fields.many2one('account.journal', 'Journal', readonly=True), 'partner_id': fields.many2one('res.partner', 'Partner', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'user_id': fields.many2one('res.users', 'Salesperson', readonly=True), 'price_total': fields.float('Total Without Tax', readonly=True), 'price_total_tax': fields.float('Total With Tax', readonly=True), 'nbr':fields.integer('# of Voucher Lines', readonly=True), 'type': fields.selection([ ('sale','Sale'), ('purchase','Purchase'), ('payment','Payment'), ('receipt','Receipt'), ],'Type', readonly=True), 'state': fields.selection([ ('draft','Draft'), ('proforma','Pro-forma'), ('posted','Posted'), ('cancel','Cancelled') ], 'Voucher Status', readonly=True), 'pay_now':fields.selection([ ('pay_now','Pay Directly'), ('pay_later','Pay Later or Group Funds'), ],'Payment', readonly=True), 'date_due': fields.date('Due Date', readonly=True), 'account_id': fields.many2one('account.account', 'Account',readonly=True), 'delay_to_pay': fields.float('Avg. Delay To Pay', readonly=True, group_operator="avg"), 'due_delay': fields.float('Avg. Due Delay', readonly=True, group_operator="avg") } _order = 'date desc' def init(self, cr): tools.drop_view_if_exists(cr, 'sale_receipt_report') cr.execute(""" create or replace view sale_receipt_report as ( select min(avl.id) as id, av.date as date, av.partner_id as partner_id, aj.currency as currency_id, av.journal_id as journal_id, rp.user_id as user_id, av.company_id as company_id, count(avl.*) as nbr, av.type as type, av.state, av.pay_now, av.date_due as date_due, av.account_id as account_id, sum(av.amount-av.tax_amount)/(select count(l.id) from account_voucher_line as l left join account_voucher as a ON (a.id=l.voucher_id) where a.id=av.id) as price_total, sum(av.amount)/(select count(l.id) from account_voucher_line as l left join account_voucher as a ON (a.id=l.voucher_id) where a.id=av.id) as price_total_tax, sum((select extract(epoch from avg(date_trunc('day',aml.date_created)-date_trunc('day',l.create_date)))/(24*60*60)::decimal(16,2) from account_move_line as aml left join account_voucher as a ON (a.move_id=aml.move_id) left join account_voucher_line as l ON (a.id=l.voucher_id) where a.id=av.id)) as delay_to_pay, sum((select extract(epoch from avg(date_trunc('day',a.date_due)-date_trunc('day',a.date)))/(24*60*60)::decimal(16,2) from account_move_line as aml left join account_voucher as a ON (a.move_id=aml.move_id) left join account_voucher_line as l ON (a.id=l.voucher_id) where a.id=av.id)) as due_delay from account_voucher_line as avl left join account_voucher as av on (av.id=avl.voucher_id) left join res_partner as rp ON (rp.id=av.partner_id) left join account_journal as aj ON (aj.id=av.journal_id) where av.type='sale' and aj.type in ('sale','sale_refund') group by av.date, av.id, av.partner_id, aj.currency, av.journal_id, rp.user_id, av.company_id, av.type, av.state, av.date_due, av.account_id, av.tax_amount, av.amount, av.tax_amount, av.pay_now ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Zanzibar82/streamondemand.test
core/update_servers.py
1
2438
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # pelisalacarta - XBMC Plugin # update_servers.py # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ # ------------------------------------------------------------ import os import re from threading import Thread from core import config from core import scrapertools DEBUG = config.get_setting("debug") UPDATE_URL_IDX, ACTIVE_IDX, VERSION_IDX, DATE_IDX, CHANGES_IDX = xrange(0, 5) remote_url = "https://raw.githubusercontent.com/Zanzibar82/plugin.video.streamondemand/master/servers/" local_folder = os.path.join(config.get_runtime_path(), "servers") ### Procedures def update_servers(): xml = scrapertools.cache_page(remote_url + "serverlist.xml") remote_dict = read_servers_list(xml) with open(os.path.join(local_folder, "serverlist.xml"), 'rb') as f: data = f.read() local_dict = read_servers_list(data) # ---------------------------- import xbmcgui progress = xbmcgui.DialogProgressBG() progress.create("Update servers list") # ---------------------------- for index, server_id in enumerate(remote_dict.iterkeys()): # ---------------------------- percentage = index * 100 / len(remote_dict) # ---------------------------- if server_id not in local_dict or remote_dict[server_id][VERSION_IDX] > local_dict[server_id][VERSION_IDX]: data = scrapertools.cache_page(remote_dict[server_id][UPDATE_URL_IDX]) with open(os.path.join(local_folder, server_id + ".py"), 'wb') as f: f.write(data) # ---------------------------- progress.update(percentage, ' Update server: ' + server_id) # ---------------------------- with open(os.path.join(local_folder, "serverlist.xml"), 'wb') as f: f.write(xml) # ---------------------------- progress.close() # ---------------------------- def read_servers_list(xml): ret = {} patron = r'<server>\s*<id>([^<]+)</id>\s*<update_url>([^<]+)</update_url>\s*<active>([^<]+)</active>\s*<version>([^<]+)</version>\s*<date>([^<]+)</date>\s*<changes>([^<]+)</changes>\s*</server>\s*' for server_id, update_url, active, version, date, changes in re.compile(patron).findall(xml): ret[server_id] = [update_url, active, int(version), date, changes] return ret ### Run Thread(target=update_servers).start()
gpl-3.0
ThinkOpen-Solutions/odoo
addons/l10n_ae/__openerp__.py
337
1579
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'U.A.E. - Accounting', 'version': '1.0', 'author': 'Tech Receptives', 'website': 'http://www.techreceptives.com', 'category': 'Localization/Account Charts', 'description': """ United Arab Emirates accounting chart and localization. ======================================================= """, 'depends': ['base', 'account', 'account_chart'], 'demo': [ ], 'data': [ 'l10n_ae_chart.xml', 'l10n_ae_wizard.xml', ], 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
vivyly/terralope
terralope/urls.py
11
1024
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf import settings from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.views.generic import TemplateView # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', # noqa TemplateView.as_view(template_name='pages/home.html'), name="home"), url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), # User management url(r'^users/', include("users.urls", namespace="users")), url(r'^accounts/', include('allauth.urls')), # Uncomment the next line to enable avatars url(r'^avatar/', include('avatar.urls')), # Your stuff: custom urls go here ) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
bsd-3-clause
WindCanDie/spark
python/pyspark/mllib/tests/test_linalg.py
7
25780
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import array as pyarray import unittest from numpy import array, array_equal, zeros, arange, tile, ones, inf import pyspark.ml.linalg as newlinalg from pyspark.serializers import PickleSerializer from pyspark.mllib.linalg import Vector, SparseVector, DenseVector, VectorUDT, _convert_to_vector, \ DenseMatrix, SparseMatrix, Vectors, Matrices, MatrixUDT from pyspark.mllib.regression import LabeledPoint from pyspark.testing.mllibutils import MLlibTestCase from pyspark.testing.utils import have_scipy class VectorTests(MLlibTestCase): def _test_serialize(self, v): ser = PickleSerializer() self.assertEqual(v, ser.loads(ser.dumps(v))) jvec = self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(bytearray(ser.dumps(v))) nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(jvec))) self.assertEqual(v, nv) vs = [v] * 100 jvecs = self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(bytearray(ser.dumps(vs))) nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(jvecs))) self.assertEqual(vs, nvs) def test_serialize(self): self._test_serialize(DenseVector(range(10))) self._test_serialize(DenseVector(array([1., 2., 3., 4.]))) self._test_serialize(DenseVector(pyarray.array('d', range(10)))) self._test_serialize(SparseVector(4, {1: 1, 3: 2})) self._test_serialize(SparseVector(3, {})) self._test_serialize(DenseMatrix(2, 3, range(6))) sm1 = SparseMatrix( 3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0]) self._test_serialize(sm1) def test_dot(self): sv = SparseVector(4, {1: 1, 3: 2}) dv = DenseVector(array([1., 2., 3., 4.])) lst = DenseVector([1, 2, 3, 4]) mat = array([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]]) arr = pyarray.array('d', [0, 1, 2, 3]) self.assertEqual(10.0, sv.dot(dv)) self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat))) self.assertEqual(30.0, dv.dot(dv)) self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat))) self.assertEqual(30.0, lst.dot(dv)) self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat))) self.assertEqual(7.0, sv.dot(arr)) def test_squared_distance(self): def squared_distance(a, b): if isinstance(a, Vector): return a.squared_distance(b) else: return b.squared_distance(a) sv = SparseVector(4, {1: 1, 3: 2}) dv = DenseVector(array([1., 2., 3., 4.])) lst = DenseVector([4, 3, 2, 1]) lst1 = [4, 3, 2, 1] arr = pyarray.array('d', [0, 2, 1, 3]) narr = array([0, 2, 1, 3]) self.assertEqual(15.0, squared_distance(sv, dv)) self.assertEqual(25.0, squared_distance(sv, lst)) self.assertEqual(20.0, squared_distance(dv, lst)) self.assertEqual(15.0, squared_distance(dv, sv)) self.assertEqual(25.0, squared_distance(lst, sv)) self.assertEqual(20.0, squared_distance(lst, dv)) self.assertEqual(0.0, squared_distance(sv, sv)) self.assertEqual(0.0, squared_distance(dv, dv)) self.assertEqual(0.0, squared_distance(lst, lst)) self.assertEqual(25.0, squared_distance(sv, lst1)) self.assertEqual(3.0, squared_distance(sv, arr)) self.assertEqual(3.0, squared_distance(sv, narr)) def test_hash(self): v1 = DenseVector([0.0, 1.0, 0.0, 5.5]) v2 = SparseVector(4, [(1, 1.0), (3, 5.5)]) v3 = DenseVector([0.0, 1.0, 0.0, 5.5]) v4 = SparseVector(4, [(1, 1.0), (3, 2.5)]) self.assertEqual(hash(v1), hash(v2)) self.assertEqual(hash(v1), hash(v3)) self.assertEqual(hash(v2), hash(v3)) self.assertFalse(hash(v1) == hash(v4)) self.assertFalse(hash(v2) == hash(v4)) def test_eq(self): v1 = DenseVector([0.0, 1.0, 0.0, 5.5]) v2 = SparseVector(4, [(1, 1.0), (3, 5.5)]) v3 = DenseVector([0.0, 1.0, 0.0, 5.5]) v4 = SparseVector(6, [(1, 1.0), (3, 5.5)]) v5 = DenseVector([0.0, 1.0, 0.0, 2.5]) v6 = SparseVector(4, [(1, 1.0), (3, 2.5)]) self.assertEqual(v1, v2) self.assertEqual(v1, v3) self.assertFalse(v2 == v4) self.assertFalse(v1 == v5) self.assertFalse(v1 == v6) def test_equals(self): indices = [1, 2, 4] values = [1., 3., 2.] self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.])) self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.])) self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.])) self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.])) def test_conversion(self): # numpy arrays should be automatically upcast to float64 # tests for fix of [SPARK-5089] v = array([1, 2, 3, 4], dtype='float64') dv = DenseVector(v) self.assertTrue(dv.array.dtype == 'float64') v = array([1, 2, 3, 4], dtype='float32') dv = DenseVector(v) self.assertTrue(dv.array.dtype == 'float64') def test_sparse_vector_indexing(self): sv = SparseVector(5, {1: 1, 3: 2}) self.assertEqual(sv[0], 0.) self.assertEqual(sv[3], 2.) self.assertEqual(sv[1], 1.) self.assertEqual(sv[2], 0.) self.assertEqual(sv[4], 0.) self.assertEqual(sv[-1], 0.) self.assertEqual(sv[-2], 2.) self.assertEqual(sv[-3], 0.) self.assertEqual(sv[-5], 0.) for ind in [5, -6]: self.assertRaises(IndexError, sv.__getitem__, ind) for ind in [7.8, '1']: self.assertRaises(TypeError, sv.__getitem__, ind) zeros = SparseVector(4, {}) self.assertEqual(zeros[0], 0.0) self.assertEqual(zeros[3], 0.0) for ind in [4, -5]: self.assertRaises(IndexError, zeros.__getitem__, ind) empty = SparseVector(0, {}) for ind in [-1, 0, 1]: self.assertRaises(IndexError, empty.__getitem__, ind) def test_sparse_vector_iteration(self): self.assertListEqual(list(SparseVector(3, [], [])), [0.0, 0.0, 0.0]) self.assertListEqual(list(SparseVector(5, [0, 3], [1.0, 2.0])), [1.0, 0.0, 0.0, 2.0, 0.0]) def test_matrix_indexing(self): mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10]) expected = [[0, 6], [1, 8], [4, 10]] for i in range(3): for j in range(2): self.assertEqual(mat[i, j], expected[i][j]) for i, j in [(-1, 0), (4, 1), (3, 4)]: self.assertRaises(IndexError, mat.__getitem__, (i, j)) def test_repr_dense_matrix(self): mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10]) self.assertTrue( repr(mat), 'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)') mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True) self.assertTrue( repr(mat), 'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)') mat = DenseMatrix(6, 3, zeros(18)) self.assertTrue( repr(mat), 'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)') def test_repr_sparse_matrix(self): sm1t = SparseMatrix( 3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], isTransposed=True) self.assertTrue( repr(sm1t), 'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)') indices = tile(arange(6), 3) values = ones(18) sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values) self.assertTrue( repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \ [0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)") self.assertTrue( str(sm), "6 X 3 CSCMatrix\n\ (0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\ (0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\ (0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..") sm = SparseMatrix(1, 18, zeros(19), [], []) self.assertTrue( repr(sm), 'SparseMatrix(1, 18, \ [0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)') def test_sparse_matrix(self): # Test sparse matrix creation. sm1 = SparseMatrix( 3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0]) self.assertEqual(sm1.numRows, 3) self.assertEqual(sm1.numCols, 4) self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4]) self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2]) self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0]) self.assertTrue( repr(sm1), 'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)') # Test indexing expected = [ [0, 0, 0, 0], [1, 0, 4, 0], [2, 0, 5, 0]] for i in range(3): for j in range(4): self.assertEqual(expected[i][j], sm1[i, j]) self.assertTrue(array_equal(sm1.toArray(), expected)) for i, j in [(-1, 1), (4, 3), (3, 5)]: self.assertRaises(IndexError, sm1.__getitem__, (i, j)) # Test conversion to dense and sparse. smnew = sm1.toDense().toSparse() self.assertEqual(sm1.numRows, smnew.numRows) self.assertEqual(sm1.numCols, smnew.numCols) self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs)) self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices)) self.assertTrue(array_equal(sm1.values, smnew.values)) sm1t = SparseMatrix( 3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], isTransposed=True) self.assertEqual(sm1t.numRows, 3) self.assertEqual(sm1t.numCols, 4) self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5]) self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2]) self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0]) expected = [ [3, 2, 0, 0], [0, 0, 4, 0], [9, 0, 8, 0]] for i in range(3): for j in range(4): self.assertEqual(expected[i][j], sm1t[i, j]) self.assertTrue(array_equal(sm1t.toArray(), expected)) def test_dense_matrix_is_transposed(self): mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True) mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9]) self.assertEqual(mat1, mat) expected = [[0, 4], [1, 6], [3, 9]] for i in range(3): for j in range(2): self.assertEqual(mat1[i, j], expected[i][j]) self.assertTrue(array_equal(mat1.toArray(), expected)) sm = mat1.toSparse() self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2])) self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5])) self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9])) def test_parse_vector(self): a = DenseVector([]) self.assertEqual(str(a), '[]') self.assertEqual(Vectors.parse(str(a)), a) a = DenseVector([3, 4, 6, 7]) self.assertEqual(str(a), '[3.0,4.0,6.0,7.0]') self.assertEqual(Vectors.parse(str(a)), a) a = SparseVector(4, [], []) self.assertEqual(str(a), '(4,[],[])') self.assertEqual(SparseVector.parse(str(a)), a) a = SparseVector(4, [0, 2], [3, 4]) self.assertEqual(str(a), '(4,[0,2],[3.0,4.0])') self.assertEqual(Vectors.parse(str(a)), a) a = SparseVector(10, [0, 1], [4, 5]) self.assertEqual(SparseVector.parse(' (10, [0,1 ],[ 4.0,5.0] )'), a) def test_norms(self): a = DenseVector([0, 2, 3, -1]) self.assertAlmostEqual(a.norm(2), 3.742, 3) self.assertTrue(a.norm(1), 6) self.assertTrue(a.norm(inf), 3) a = SparseVector(4, [0, 2], [3, -4]) self.assertAlmostEqual(a.norm(2), 5) self.assertTrue(a.norm(1), 7) self.assertTrue(a.norm(inf), 4) tmp = SparseVector(4, [0, 2], [3, 0]) self.assertEqual(tmp.numNonzeros(), 1) def test_ml_mllib_vector_conversion(self): # to ml # dense mllibDV = Vectors.dense([1, 2, 3]) mlDV1 = newlinalg.Vectors.dense([1, 2, 3]) mlDV2 = mllibDV.asML() self.assertEqual(mlDV2, mlDV1) # sparse mllibSV = Vectors.sparse(4, {1: 1.0, 3: 5.5}) mlSV1 = newlinalg.Vectors.sparse(4, {1: 1.0, 3: 5.5}) mlSV2 = mllibSV.asML() self.assertEqual(mlSV2, mlSV1) # from ml # dense mllibDV1 = Vectors.dense([1, 2, 3]) mlDV = newlinalg.Vectors.dense([1, 2, 3]) mllibDV2 = Vectors.fromML(mlDV) self.assertEqual(mllibDV1, mllibDV2) # sparse mllibSV1 = Vectors.sparse(4, {1: 1.0, 3: 5.5}) mlSV = newlinalg.Vectors.sparse(4, {1: 1.0, 3: 5.5}) mllibSV2 = Vectors.fromML(mlSV) self.assertEqual(mllibSV1, mllibSV2) def test_ml_mllib_matrix_conversion(self): # to ml # dense mllibDM = Matrices.dense(2, 2, [0, 1, 2, 3]) mlDM1 = newlinalg.Matrices.dense(2, 2, [0, 1, 2, 3]) mlDM2 = mllibDM.asML() self.assertEqual(mlDM2, mlDM1) # transposed mllibDMt = DenseMatrix(2, 2, [0, 1, 2, 3], True) mlDMt1 = newlinalg.DenseMatrix(2, 2, [0, 1, 2, 3], True) mlDMt2 = mllibDMt.asML() self.assertEqual(mlDMt2, mlDMt1) # sparse mllibSM = Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) mlSM1 = newlinalg.Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) mlSM2 = mllibSM.asML() self.assertEqual(mlSM2, mlSM1) # transposed mllibSMt = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True) mlSMt1 = newlinalg.SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True) mlSMt2 = mllibSMt.asML() self.assertEqual(mlSMt2, mlSMt1) # from ml # dense mllibDM1 = Matrices.dense(2, 2, [1, 2, 3, 4]) mlDM = newlinalg.Matrices.dense(2, 2, [1, 2, 3, 4]) mllibDM2 = Matrices.fromML(mlDM) self.assertEqual(mllibDM1, mllibDM2) # transposed mllibDMt1 = DenseMatrix(2, 2, [1, 2, 3, 4], True) mlDMt = newlinalg.DenseMatrix(2, 2, [1, 2, 3, 4], True) mllibDMt2 = Matrices.fromML(mlDMt) self.assertEqual(mllibDMt1, mllibDMt2) # sparse mllibSM1 = Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) mlSM = newlinalg.Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) mllibSM2 = Matrices.fromML(mlSM) self.assertEqual(mllibSM1, mllibSM2) # transposed mllibSMt1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True) mlSMt = newlinalg.SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True) mllibSMt2 = Matrices.fromML(mlSMt) self.assertEqual(mllibSMt1, mllibSMt2) class VectorUDTTests(MLlibTestCase): dv0 = DenseVector([]) dv1 = DenseVector([1.0, 2.0]) sv0 = SparseVector(2, [], []) sv1 = SparseVector(2, [1], [2.0]) udt = VectorUDT() def test_json_schema(self): self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt) def test_serialization(self): for v in [self.dv0, self.dv1, self.sv0, self.sv1]: self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v))) def test_infer_schema(self): rdd = self.sc.parallelize([LabeledPoint(1.0, self.dv1), LabeledPoint(0.0, self.sv1)]) df = rdd.toDF() schema = df.schema field = [f for f in schema.fields if f.name == "features"][0] self.assertEqual(field.dataType, self.udt) vectors = df.rdd.map(lambda p: p.features).collect() self.assertEqual(len(vectors), 2) for v in vectors: if isinstance(v, SparseVector): self.assertEqual(v, self.sv1) elif isinstance(v, DenseVector): self.assertEqual(v, self.dv1) else: raise TypeError("expecting a vector but got %r of type %r" % (v, type(v))) class MatrixUDTTests(MLlibTestCase): dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10]) dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True) sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0]) sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True) udt = MatrixUDT() def test_json_schema(self): self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt) def test_serialization(self): for m in [self.dm1, self.dm2, self.sm1, self.sm2]: self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m))) def test_infer_schema(self): rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)]) df = rdd.toDF() schema = df.schema self.assertTrue(schema.fields[1].dataType, self.udt) matrices = df.rdd.map(lambda x: x._2).collect() self.assertEqual(len(matrices), 2) for m in matrices: if isinstance(m, DenseMatrix): self.assertTrue(m, self.dm1) elif isinstance(m, SparseMatrix): self.assertTrue(m, self.sm1) else: raise ValueError("Expected a matrix but got type %r" % type(m)) @unittest.skipIf(not have_scipy, "SciPy not installed") class SciPyTests(MLlibTestCase): """ Test both vector operations and MLlib algorithms with SciPy sparse matrices, if SciPy is available. """ def test_serialize(self): from scipy.sparse import lil_matrix ser = PickleSerializer() lil = lil_matrix((4, 1)) lil[1, 0] = 1 lil[3, 0] = 2 sv = SparseVector(4, {1: 1, 3: 2}) self.assertEqual(sv, _convert_to_vector(lil)) self.assertEqual(sv, _convert_to_vector(lil.tocsc())) self.assertEqual(sv, _convert_to_vector(lil.tocoo())) self.assertEqual(sv, _convert_to_vector(lil.tocsr())) self.assertEqual(sv, _convert_to_vector(lil.todok())) def serialize(l): return ser.loads(ser.dumps(_convert_to_vector(l))) self.assertEqual(sv, serialize(lil)) self.assertEqual(sv, serialize(lil.tocsc())) self.assertEqual(sv, serialize(lil.tocsr())) self.assertEqual(sv, serialize(lil.todok())) def test_convert_to_vector(self): from scipy.sparse import csc_matrix # Create a CSC matrix with non-sorted indices indptr = array([0, 2]) indices = array([3, 1]) data = array([2.0, 1.0]) csc = csc_matrix((data, indices, indptr)) self.assertFalse(csc.has_sorted_indices) sv = SparseVector(4, {1: 1, 3: 2}) self.assertEqual(sv, _convert_to_vector(csc)) def test_dot(self): from scipy.sparse import lil_matrix lil = lil_matrix((4, 1)) lil[1, 0] = 1 lil[3, 0] = 2 dv = DenseVector(array([1., 2., 3., 4.])) self.assertEqual(10.0, dv.dot(lil)) def test_squared_distance(self): from scipy.sparse import lil_matrix lil = lil_matrix((4, 1)) lil[1, 0] = 3 lil[3, 0] = 2 dv = DenseVector(array([1., 2., 3., 4.])) sv = SparseVector(4, {0: 1, 1: 2, 2: 3, 3: 4}) self.assertEqual(15.0, dv.squared_distance(lil)) self.assertEqual(15.0, sv.squared_distance(lil)) def scipy_matrix(self, size, values): """Create a column SciPy matrix from a dictionary of values""" from scipy.sparse import lil_matrix lil = lil_matrix((size, 1)) for key, value in values.items(): lil[key, 0] = value return lil def test_clustering(self): from pyspark.mllib.clustering import KMeans data = [ self.scipy_matrix(3, {1: 1.0}), self.scipy_matrix(3, {1: 1.1}), self.scipy_matrix(3, {2: 1.0}), self.scipy_matrix(3, {2: 1.1}) ] clusters = KMeans.train(self.sc.parallelize(data), 2, initializationMode="k-means||") self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1])) self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3])) def test_classification(self): from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes from pyspark.mllib.tree import DecisionTree data = [ LabeledPoint(0.0, self.scipy_matrix(2, {0: 1.0})), LabeledPoint(1.0, self.scipy_matrix(2, {1: 1.0})), LabeledPoint(0.0, self.scipy_matrix(2, {0: 2.0})), LabeledPoint(1.0, self.scipy_matrix(2, {1: 2.0})) ] rdd = self.sc.parallelize(data) features = [p.features for p in data] lr_model = LogisticRegressionWithSGD.train(rdd) self.assertTrue(lr_model.predict(features[0]) <= 0) self.assertTrue(lr_model.predict(features[1]) > 0) self.assertTrue(lr_model.predict(features[2]) <= 0) self.assertTrue(lr_model.predict(features[3]) > 0) svm_model = SVMWithSGD.train(rdd) self.assertTrue(svm_model.predict(features[0]) <= 0) self.assertTrue(svm_model.predict(features[1]) > 0) self.assertTrue(svm_model.predict(features[2]) <= 0) self.assertTrue(svm_model.predict(features[3]) > 0) nb_model = NaiveBayes.train(rdd) self.assertTrue(nb_model.predict(features[0]) <= 0) self.assertTrue(nb_model.predict(features[1]) > 0) self.assertTrue(nb_model.predict(features[2]) <= 0) self.assertTrue(nb_model.predict(features[3]) > 0) categoricalFeaturesInfo = {0: 3} # feature 0 has 3 categories dt_model = DecisionTree.trainClassifier(rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo) self.assertTrue(dt_model.predict(features[0]) <= 0) self.assertTrue(dt_model.predict(features[1]) > 0) self.assertTrue(dt_model.predict(features[2]) <= 0) self.assertTrue(dt_model.predict(features[3]) > 0) def test_regression(self): from pyspark.mllib.regression import LinearRegressionWithSGD, LassoWithSGD, \ RidgeRegressionWithSGD from pyspark.mllib.tree import DecisionTree data = [ LabeledPoint(-1.0, self.scipy_matrix(2, {1: -1.0})), LabeledPoint(1.0, self.scipy_matrix(2, {1: 1.0})), LabeledPoint(-1.0, self.scipy_matrix(2, {1: -2.0})), LabeledPoint(1.0, self.scipy_matrix(2, {1: 2.0})) ] rdd = self.sc.parallelize(data) features = [p.features for p in data] lr_model = LinearRegressionWithSGD.train(rdd) self.assertTrue(lr_model.predict(features[0]) <= 0) self.assertTrue(lr_model.predict(features[1]) > 0) self.assertTrue(lr_model.predict(features[2]) <= 0) self.assertTrue(lr_model.predict(features[3]) > 0) lasso_model = LassoWithSGD.train(rdd) self.assertTrue(lasso_model.predict(features[0]) <= 0) self.assertTrue(lasso_model.predict(features[1]) > 0) self.assertTrue(lasso_model.predict(features[2]) <= 0) self.assertTrue(lasso_model.predict(features[3]) > 0) rr_model = RidgeRegressionWithSGD.train(rdd) self.assertTrue(rr_model.predict(features[0]) <= 0) self.assertTrue(rr_model.predict(features[1]) > 0) self.assertTrue(rr_model.predict(features[2]) <= 0) self.assertTrue(rr_model.predict(features[3]) > 0) categoricalFeaturesInfo = {0: 2} # feature 0 has 2 categories dt_model = DecisionTree.trainRegressor(rdd, categoricalFeaturesInfo=categoricalFeaturesInfo) self.assertTrue(dt_model.predict(features[0]) <= 0) self.assertTrue(dt_model.predict(features[1]) > 0) self.assertTrue(dt_model.predict(features[2]) <= 0) self.assertTrue(dt_model.predict(features[3]) > 0) if __name__ == "__main__": from pyspark.mllib.tests.test_linalg import * try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports') except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
nesterione/scikit-learn
benchmarks/bench_plot_fastkmeans.py
294
4676
from __future__ import print_function from collections import defaultdict from time import time import numpy as np from numpy import random as nr from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) chunk = 100 max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('==============================') print('Iteration %03d of %03d' % (it, max_it)) print('==============================') print() data = nr.random_integers(-50, 50, (n_samples, n_features)) print('K-Means') tstart = time() kmeans = KMeans(init='k-means++', n_clusters=10).fit(data) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %0.5f" % kmeans.inertia_) print() results['kmeans_speed'].append(delta) results['kmeans_quality'].append(kmeans.inertia_) print('Fast K-Means') # let's prepare the data in small chunks mbkmeans = MiniBatchKMeans(init='k-means++', n_clusters=10, batch_size=chunk) tstart = time() mbkmeans.fit(data) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %f" % mbkmeans.inertia_) print() print() results['MiniBatchKMeans Speed'].append(delta) results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_) return results def compute_bench_2(chunks): results = defaultdict(lambda: []) n_features = 50000 means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1], [0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]]) X = np.empty((0, 2)) for i in range(8): X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)] max_it = len(chunks) it = 0 for chunk in chunks: it += 1 print('==============================') print('Iteration %03d of %03d' % (it, max_it)) print('==============================') print() print('Fast K-Means') tstart = time() mbkmeans = MiniBatchKMeans(init='k-means++', n_clusters=8, batch_size=chunk) mbkmeans.fit(X) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %0.3fs" % mbkmeans.inertia_) print() results['MiniBatchKMeans Speed'].append(delta) results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(50, 150, 5).astype(np.int) features_range = np.linspace(150, 50000, 5).astype(np.int) chunks = np.linspace(500, 10000, 15).astype(np.int) results = compute_bench(samples_range, features_range) results_2 = compute_bench_2(chunks) max_time = max([max(i) for i in [t for (label, t) in results.iteritems() if "speed" in label]]) max_inertia = max([max(i) for i in [ t for (label, t) in results.iteritems() if "speed" not in label]]) fig = plt.figure('scikit-learn K-Means benchmark results') for c, (label, timings) in zip('brcy', sorted(results.iteritems())): if 'speed' in label: ax = fig.add_subplot(2, 2, 1, projection='3d') ax.set_zlim3d(0.0, max_time * 1.1) else: ax = fig.add_subplot(2, 2, 2, projection='3d') ax.set_zlim3d(0.0, max_inertia * 1.1) X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') i = 0 for c, (label, timings) in zip('br', sorted(results_2.iteritems())): i += 1 ax = fig.add_subplot(2, 2, i + 2) y = np.asarray(timings) ax.plot(chunks, y, color=c, alpha=0.8) ax.set_xlabel('Chunks') ax.set_ylabel(label) plt.show()
bsd-3-clause
thaim/ansible
lib/ansible/modules/network/f5/bigip_firewall_rule.py
21
43698
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_firewall_rule short_description: Manage AFM Firewall rules description: - Manages firewall rules in an AFM firewall policy. New rules will always be added to the end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module. Rules can also be pre-ordered using the C(bigip_security_policy) module and then later updated using the C(bigip_firewall_rule) module. version_added: 2.7 options: name: description: - Specifies the name of the rule. type: str required: True parent_policy: description: - The policy which contains the rule to be managed. - One of either C(parent_policy) or C(parent_rule_list) is required. type: str parent_rule_list: description: - The rule list which contains the rule to be managed. - One of either C(parent_policy) or C(parent_rule_list) is required. type: str action: description: - Specifies the action for the firewall rule. - When C(accept), allows packets with the specified source, destination, and protocol to pass through the firewall. Packets that match the rule, and are accepted, traverse the system as if the firewall is not present. - When C(drop), drops packets with the specified source, destination, and protocol. Dropping a packet is a silent action with no notification to the source or destination systems. Dropping the packet causes the connection to be retried until the retry threshold is reached. - When C(reject), rejects packets with the specified source, destination, and protocol. When a packet is rejected the firewall sends a destination unreachable message to the sender. - When C(accept-decisively), allows packets with the specified source, destination, and protocol to pass through the firewall, and does not require any further processing by any of the further firewalls. Packets that match the rule, and are accepted, traverse the system as if the firewall is not present. If the Rule List is applied to a virtual server, management IP, or self IP firewall rule, then Accept Decisively is equivalent to Accept. - When creating a new rule, if this parameter is not provided, the default is C(reject). type: str choices: - accept - drop - reject - accept-decisively status: description: - Indicates the activity state of the rule or rule list. - When C(disabled), specifies that the rule or rule list does not apply at all. - When C(enabled), specifies that the system applies the firewall rule or rule list to the given context and addresses. - When C(scheduled), specifies that the system applies the rule or rule list according to the specified schedule. - When creating a new rule, if this parameter is not provided, the default is C(enabled). type: str choices: - enabled - disabled - scheduled schedule: description: - Specifies a schedule for the firewall rule. - You configure schedules to define days and times when the firewall rule is made active. type: str description: description: - The rule description. type: str irule: description: - Specifies an iRule that is applied to the firewall rule. - An iRule can be started when the firewall rule matches traffic. type: str protocol: description: - Specifies the protocol to which the rule applies. - Protocols may be specified by either their name or numeric value. - A special protocol value C(any) can be specified to match any protocol. The numeric equivalent of this protocol is C(255). type: str source: description: - Specifies packet sources to which the rule applies. - Leaving this field blank applies the rule to all addresses and all ports. - You can specify the following source items. An IPv4 or IPv6 address, an IPv4 or IPv6 address range, geographic location, VLAN, address list, port, port range, port list or address list. - You can specify a mix of different types of items for the source address. suboptions: address: description: - Specifies a specific IP address. type: str address_list: description: - Specifies an existing address list. type: str address_range: description: - Specifies an address range. type: str country: description: - Specifies a country code. type: str port: description: - Specifies a single numeric port. - This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17). type: int port_list: description: - Specifes an existing port list. - This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17). type: str port_range: description: - Specifies a range of ports, which is two port values separated by a hyphen. The port to the left of the hyphen should be less than the port to the right. - This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17). type: str vlan: description: - Specifies VLANs to which the rule applies. - The VLAN source refers to the packet's source. type: str type: list destination: description: - Specifies packet destinations to which the rule applies. - Leaving this field blank applies the rule to all addresses and all ports. - You can specify the following destination items. An IPv4 or IPv6 address, an IPv4 or IPv6 address range, geographic location, VLAN, address list, port, port range, port list or address list. - You can specify a mix of different types of items for the source address. suboptions: address: description: - Specifies a specific IP address. type: str address_list: description: - Specifies an existing address list. type: str address_range: description: - Specifies an address range. type: str country: description: - Specifies a country code. type: str port: description: - Specifies a single numeric port. - This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17). type: int port_list: description: - Specifes an existing port list. - This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17). type: str port_range: description: - Specifies a range of ports, which is two port values separated by a hyphen. The port to the left of the hyphen should be less than the port to the right. - This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17). type: str type: list logging: description: - Specifies whether logging is enabled or disabled for the firewall rule. - When creating a new rule, if this parameter is not specified, the default if C(no). type: bool rule_list: description: - Specifies an existing rule list to use in the rule. - This parameter is mutually exclusive with many of the other individual-rule specific settings. This includes C(logging), C(action), C(source), C(destination), C(irule'), C(protocol) and C(logging). - This parameter is only used when C(parent_policy) is specified, otherwise it is ignored. type: str icmp_message: description: - Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses. - This parameter is only relevant when C(protocol) is either C(icmp)(1) or C(icmpv6)(58). suboptions: type: description: - Specifies the type of ICMP message. - You can specify control messages, such as Echo Reply (0) and Destination Unreachable (3), or you can specify C(any) to indicate that the system applies the rule for all ICMP messages. - You can also specify an arbitrary ICMP message. - The ICMP protocol contains definitions for the existing message type and number pairs. type: str code: description: - Specifies the code returned in response to the specified ICMP message type. - You can specify codes, each set appropriate to the associated type, such as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1) (associated with Destination Unreachable (3)), or you can specify C(any) to indicate that the system applies the rule for all codes in response to that specific ICMP message. - You can also specify an arbitrary code. - The ICMP protocol contains definitions for the existing message code and number pairs. type: str type: list partition: description: - Device partition to manage resources on. type: str default: Common state: description: - When C(state) is C(present), ensures that the rule exists. - When C(state) is C(absent), ensures that the rule is removed. type: str choices: - present - absent default: present extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Create a new rule in the foo firewall policy bigip_firewall_rule: name: foo parent_policy: policy1 protocol: tcp source: - address: 1.2.3.4 - address: "::1" - address_list: foo-list1 - address_range: 1.1.1.1-2.2.2.2 - vlan: vlan1 - country: US - port: 22 - port_list: port-list1 - port_range: 80-443 destination: - address: 1.2.3.4 - address: "::1" - address_list: foo-list1 - address_range: 1.1.1.1-2.2.2.2 - country: US - port: 22 - port_list: port-list1 - port_range: 80-443 irule: irule1 action: accept logging: yes provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Create an ICMP specific rule bigip_firewall_rule: name: foo protocol: icmp icmp_message: type: 0 source: - country: US action: drop logging: yes provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Add a new policy rule that uses an existing rule list bigip_firewall_rule: name: foo parent_policy: foo_policy rule_list: rule-list1 provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' name: description: Name of the rule. returned: changed type: str sample: FooRule parent_policy: description: The policy which contains the rule to be managed. returned: changed type: str sample: FooPolicy parent_rule_list: description: The rule list which contains the rule to be managed. returned: changed type: str sample: FooRuleList action: description: The action for the firewall rule. returned: changed type: str sample: drop status: description: The activity state of the rule or rule list. returned: changed type: str sample: scheduled schedule: description: The schedule for the firewall rule. returned: changed type: str sample: Foo_schedule description: description: The rule description. returned: changed type: str sample: MyRule irule: description: The iRule that is applied to the firewall rule. returned: changed type: str sample: _sys_auth_radius protocol: description: The protocol to which the rule applies. returned: changed type: str sample: any source: description: The packet sources to which the rule applies returned: changed type: complex contains: address: description: A specific IP address. returned: changed type: str sample: 192.168.1.1 address_list: description: An existing address list. returned: changed type: str sample: foo-list1 address_range: description: The address range. returned: changed type: str sample: 1.1.1.1-2.2.2.2 country: description: A country code. returned: changed type: str sample: US port: description: Single numeric port. returned: changed type: int sample: 8080 port_list: description: An existing port list. returned: changed type: str sample: port-list1 port_range: description: The port range. returned: changed type: str sample: 80-443 vlan: description: Source VLANs for the packets. returned: changed type: str sample: vlan1 sample: hash/dictionary of values destination: description: The packet destinations to which the rule applies. returned: changed type: complex contains: address: description: A specific IP address. returned: changed type: str sample: 192.168.1.1 address_list: description: An existing address list. returned: changed type: str sample: foo-list1 address_range: description: The address range. returned: changed type: str sample: 1.1.1.1-2.2.2.2 country: description: A country code. returned: changed type: str sample: US port: description: Single numeric port. returned: changed type: int sample: 8080 port_list: description: An existing port list. returned: changed type: str sample: port-list1 port_range: description: The port range. returned: changed type: str sample: 80-443 sample: hash/dictionary of values logging: description: Enable or Disable logging for the firewall rule. returned: changed type: bool sample: yes rule_list: description: An existing rule list to use in the parent policy. returned: changed type: str sample: rule-list-1 icmp_message: description: The (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses. returned: changed type: complex contains: type: description: The type of ICMP message. returned: changed type: str sample: 0 code: description: The code returned in response to the specified ICMP message type. returned: changed type: str sample: 1 sample: hash/dictionary of values ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import transform_name except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import transform_name class Parameters(AnsibleF5Parameters): api_map = { 'ipProtocol': 'protocol', 'log': 'logging', 'icmp': 'icmp_message', 'ruleList': 'rule_list' } api_attributes = [ 'irule', 'ipProtocol', 'log', 'schedule', 'status', 'destination', 'source', 'icmp', 'action', 'description', 'ruleList', ] returnables = [ 'logging', 'protocol', 'irule', 'source', 'destination', 'action', 'status', 'schedule', 'description', 'icmp_message', 'rule_list', ] updatables = [ 'logging', 'protocol', 'irule', 'source', 'destination', 'action', 'status', 'schedule', 'description', 'icmp_message', 'rule_list', ] protocol_map = { '1': 'icmp', '6': 'tcp', '17': 'udp', '58': 'icmpv6', '255': 'any', } class ApiParameters(Parameters): @property def logging(self): if self._values['logging'] is None: return None if self._values['logging'] == 'yes': return True return False @property def protocol(self): if self._values['protocol'] is None: return None if self._values['protocol'] in self.protocol_map: return self.protocol_map[self._values['protocol']] return self._values['protocol'] @property def source(self): result = [] if self._values['source'] is None: return None v = self._values['source'] if 'addressLists' in v: result += [('address_list', x) for x in v['addressLists']] if 'vlans' in v: result += [('vlan', x) for x in v['vlans']] if 'geo' in v: result += [('geo', x['name']) for x in v['geo']] if 'addresses' in v: result += [('address', x['name']) for x in v['addresses']] if 'ports' in v: result += [('port', str(x['name'])) for x in v['ports']] if 'portLists' in v: result += [('port_list', x) for x in v['portLists']] if result: return result return None @property def destination(self): result = [] if self._values['destination'] is None: return None v = self._values['destination'] if 'addressLists' in v: result += [('address_list', x) for x in v['addressLists']] if 'geo' in v: result += [('geo', x['name']) for x in v['geo']] if 'addresses' in v: result += [('address', x['name']) for x in v['addresses']] if 'ports' in v: result += [('port', x['name']) for x in v['ports']] if 'portLists' in v: result += [('port_list', x) for x in v['portLists']] if result: return result return None @property def icmp_message(self): if self._values['icmp_message'] is None: return None result = [x['name'] for x in self._values['icmp_message']] return result class ModuleParameters(Parameters): @property def irule(self): if self._values['irule'] is None: return None if self._values['irule'] == '': return '' return fq_name(self.partition, self._values['irule']) @property def description(self): if self._values['description'] is None: return None if self._values['description'] == '': return '' return self._values['description'] @property def schedule(self): if self._values['schedule'] is None: return None if self._values['schedule'] == '': return '' return fq_name(self.partition, self._values['schedule']) @property def source(self): result = [] if self._values['source'] is None: return None for x in self._values['source']: if 'address' in x and x['address'] is not None: result += [('address', x['address'])] elif 'address_range' in x and x['address_range'] is not None: result += [('address', x['address_range'])] elif 'address_list' in x and x['address_list'] is not None: result += [('address_list', x['address_list'])] elif 'country' in x and x['country'] is not None: result += [('geo', x['country'])] elif 'vlan' in x and x['vlan'] is not None: result += [('vlan', fq_name(self.partition, x['vlan']))] elif 'port' in x and x['port'] is not None: result += [('port', str(x['port']))] elif 'port_range' in x and x['port_range'] is not None: result += [('port', x['port_range'])] elif 'port_list' in x and x['port_list'] is not None: result += [('port_list', fq_name(self.partition, x['port_list']))] if result: return result return None @property def destination(self): result = [] if self._values['destination'] is None: return None for x in self._values['destination']: if 'address' in x and x['address'] is not None: result += [('address', x['address'])] elif 'address_range' in x and x['address_range'] is not None: result += [('address', x['address_range'])] elif 'address_list' in x and x['address_list'] is not None: result += [('address_list', x['address_list'])] elif 'country' in x and x['country'] is not None: result += [('geo', x['country'])] elif 'port' in x and x['port'] is not None: result += [('port', str(x['port']))] elif 'port_range' in x and x['port_range'] is not None: result += [('port', x['port_range'])] elif 'port_list' in x and x['port_list'] is not None: result += [('port_list', fq_name(self.partition, x['port_list']))] if result: return result return None @property def icmp_message(self): if self._values['icmp_message'] is None: return None result = [] for x in self._values['icmp_message']: type = x.get('type', '255') code = x.get('code', '255') if type is None or type == 'any': type = '255' if code is None or code == 'any': code = '255' if type == '255' and code == '255': result.append("255") elif type == '255' and code != '255': raise F5ModuleError( "A type of 'any' (255) requires a code of 'any'." ) elif code == '255': result.append(type) else: result.append('{0}:{1}'.format(type, code)) result = list(set(result)) return result @property def rule_list(self): if self._values['rule_list'] is None: return None if self._values['parent_policy'] is not None: return fq_name(self.partition, self._values['rule_list']) return None class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): @property def logging(self): if self._values['logging'] is None: return None if self._values['logging'] is True: return "yes" return "no" @property def source(self): if self._values['source'] is None: return None result = dict( addresses=[], addressLists=[], vlans=[], geo=[], ports=[], portLists=[] ) for x in self._values['source']: if x[0] == 'address': result['addresses'].append({'name': x[1]}) elif x[0] == 'address_list': result['addressLists'].append(x[1]) elif x[0] == 'vlan': result['vlans'].append(x[1]) elif x[0] == 'geo': result['geo'].append({'name': x[1]}) elif x[0] == 'port': result['ports'].append({'name': str(x[1])}) elif x[0] == 'port_list': result['portLists'].append(x[1]) return result @property def destination(self): if self._values['destination'] is None: return None result = dict( addresses=[], addressLists=[], vlans=[], geo=[], ports=[], portLists=[] ) for x in self._values['destination']: if x[0] == 'address': result['addresses'].append({'name': x[1]}) elif x[0] == 'address_list': result['addressLists'].append(x[1]) elif x[0] == 'geo': result['geo'].append({'name': x[1]}) elif x[0] == 'port': result['ports'].append({'name': str(x[1])}) elif x[0] == 'port_list': result['portLists'].append(x[1]) return result @property def icmp_message(self): if self._values['icmp_message'] is None: return None result = [] for x in self._values['icmp_message']: result.append({'name': x}) return result class ReportableChanges(Changes): @property def source(self): if self._values['source'] is None: return None result = [] v = self._values['source'] if v['addressLists']: result += [('address_list', x) for x in v['addressLists']] if v['vlans']: result += [('vlan', x) for x in v['vlans']] if v['geo']: result += [('geo', x['name']) for x in v['geo']] if v['addresses']: result += [('address', x['name']) for x in v['addresses']] if v['ports']: result += [('port', str(x)) for x in v['ports']] if v['portLists']: result += [('port_list', x['name']) for x in v['portLists']] if result: return dict(result) return None @property def destination(self): if self._values['destination'] is None: return None result = [] v = self._values['destination'] if v['addressLists']: result += [('address_list', x) for x in v['addressLists']] if v['geo']: result += [('geo', x['name']) for x in v['geo']] if v['addresses']: result += [('address', x['name']) for x in v['addresses']] if v['ports']: result += [('port', str(x)) for x in v['ports']] if v['portLists']: result += [('port_list', x['name']) for x in v['portLists']] if result: return dict(result) return None class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def irule(self): if self.want.irule is None: return None if self.have.irule is None and self.want.irule == '': return None if self.have.irule is None: return self.want.irule if self.want.irule != self.have.irule: return self.want.irule @property def description(self): if self.want.description is None: return None if self.have.description is None and self.want.description == '': return None if self.have.description is None: return self.want.description if self.want.description != self.have.description: return self.want.description @property def source(self): if self.want.source is None: return None if self.want.source is None and self.have.source is None: return None if self.have.source is None: return self.want.source if set(self.want.source) != set(self.have.source): return self.want.source @property def destination(self): if self.want.destination is None: return None if self.want.destination is None and self.have.destination is None: return None if self.have.destination is None: return self.want.destination if set(self.want.destination) != set(self.have.destination): return self.want.destination @property def icmp_message(self): if self.want.icmp_message is None: return None if self.want.icmp_message is None and self.have.icmp_message is None: return None if self.have.icmp_message is None: return self.want.icmp_message if set(self.want.icmp_message) != set(self.have.icmp_message): return self.want.icmp_message class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): name = self.want.name if self.want.parent_policy: uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), name.replace('/', '_') ) else: uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), name.replace('/', '_') ) resp = self.client.api.get(uri) if resp.ok: return True return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() self.set_reasonable_creation_defaults() if self.want.status == 'scheduled' and self.want.schedule is None: raise F5ModuleError( "A 'schedule' must be specified when 'status' is 'scheduled'." ) if self.module.check_mode: return True self.create_on_device() return True def set_reasonable_creation_defaults(self): if self.want.action is None: self.changes.update({'action': 'reject'}) if self.want.logging is None: self.changes.update({'logging': False}) if self.want.status is None: self.changes.update({'status': 'enabled'}) def create_on_device(self): params = self.changes.api_params() name = self.want.name params['name'] = name.replace('/', '_') params['partition'] = self.want.partition params['placeAfter'] = 'last' if self.want.parent_policy: uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), ) else: uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), ) if self.changes.protocol not in ['icmp', 'icmpv6']: if self.changes.icmp_message is not None: raise F5ModuleError( "The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'." ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403, 404]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def update_on_device(self): name = self.want.name if self.want.parent_policy and self.want.rule_list: uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), name.replace('/', '_') ) elif self.want.parent_policy: uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), name.replace('/', '_') ) else: uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), name.replace('/', '_') ) if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']: if self.changes.icmp_message is not None: raise F5ModuleError( "The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'." ) if self.changes.protocol in ['icmp', 'icmpv6']: self.changes.update({'source': {}}) self.changes.update({'destination': {}}) params = self.changes.api_params() resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403, 404]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): name = self.want.name if self.want.parent_policy: uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), name.replace('/', '_') ) else: uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), name.replace('/', '_') ) resp = self.client.api.delete(uri) if resp.status == 200: return True def read_current_from_device(self): if self.want.parent_policy: uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), self.want.name ) else: uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), self.want.name ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), parent_policy=dict(), parent_rule_list=dict(), logging=dict(type='bool'), protocol=dict(), irule=dict(), description=dict(), source=dict( type='list', elements='dict', options=dict( address=dict(), address_list=dict(), address_range=dict(), country=dict(), port=dict(type='int'), port_list=dict(), port_range=dict(), vlan=dict(), ), mutually_exclusive=[[ 'address', 'address_list', 'address_range', 'country', 'vlan', 'port', 'port_range', 'port_list' ]] ), destination=dict( type='list', elements='dict', options=dict( address=dict(), address_list=dict(), address_range=dict(), country=dict(), port=dict(type='int'), port_list=dict(), port_range=dict(), ), mutually_exclusive=[[ 'address', 'address_list', 'address_range', 'country', 'port', 'port_range', 'port_list' ]] ), action=dict( choices=['accept', 'drop', 'reject', 'accept-decisively'] ), status=dict( choices=['enabled', 'disabled', 'scheduled'] ), schedule=dict(), rule_list=dict(), icmp_message=dict( type='list', elements='dict', options=dict( type=dict(), code=dict(), ) ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ), state=dict( default='present', choices=['present', 'absent'] ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['rule_list', 'action'], ['rule_list', 'source'], ['rule_list', 'destination'], ['rule_list', 'irule'], ['rule_list', 'protocol'], ['rule_list', 'logging'], ['parent_policy', 'parent_rule_list'] ] self.required_one_of = [ ['parent_policy', 'parent_rule_list'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive, required_one_of=spec.required_one_of ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
mit
mrquim/repository.mrquim
repo/script.module.youtube.dl/lib/youtube_dl/extractor/teletask.py
215
1739
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unified_strdate class TeleTaskIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tele-task\.de/archive/video/html5/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.tele-task.de/archive/video/html5/26168/', 'info_dict': { 'id': '26168', 'title': 'Duplicate Detection', }, 'playlist': [{ 'md5': '290ef69fb2792e481169c3958dbfbd57', 'info_dict': { 'id': '26168-speaker', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', } }, { 'md5': 'e1e7218c5f0e4790015a437fcf6c71b4', 'info_dict': { 'id': '26168-slides', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', } }] } def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) title = self._html_search_regex( r'itemprop="name">([^<]+)</a>', webpage, 'title') upload_date = unified_strdate(self._html_search_regex( r'Date:</td><td>([^<]+)</td>', webpage, 'date', fatal=False)) entries = [{ 'id': '%s-%s' % (lecture_id, format_id), 'url': video_url, 'title': title, 'upload_date': upload_date, } for format_id, video_url in re.findall( r'<video class="([^"]+)"[^>]*>\s*<source src="([^"]+)"', webpage)] return self.playlist_result(entries, lecture_id, title)
gpl-2.0
highweb-project/highweb-webcl-html5spec
third_party/WebKit/Source/devtools/scripts/generate_supported_css.py
37
2811
#!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. try: import simplejson as json except ImportError: import json import sys def properties_from_file(file_name): properties = [] propertyNames = set() with open(file_name, "r") as f: for line in f: line = line.strip() if not line or line.startswith("//") or "alias_for" in line: continue name = line.partition(" ")[0] entry = {"name": name} propertyNames.add(name) longhands = line.partition("longhands=")[2].partition(",")[0] if longhands: entry["longhands"] = longhands.split(";") properties.append(entry) # Filter out unsupported longhands. for property in properties: if "longhands" not in property: continue longhands = property["longhands"] longhands = [longhand for longhand in longhands if longhand in propertyNames] if not longhands: del property["longhands"] else: property["longhands"] = longhands return properties properties = properties_from_file(sys.argv[1]) with open(sys.argv[2], "w") as f: f.write("WebInspector.CSSMetadata.initializeWithSupportedProperties(%s);" % json.dumps(properties))
bsd-3-clause
pozetroninc/micropython
tests/float/float2int_fp30_intbig.py
30
2629
# check cases converting float to int, relying only on single precision float try: import ustruct as struct except: import struct import sys maxsize_bits = 0 maxsize = sys.maxsize while maxsize: maxsize >>= 1 maxsize_bits += 1 # work out configuration values is_64bit = maxsize_bits > 32 # 0 = none, 1 = long long, 2 = mpz ll_type = None if is_64bit: if maxsize_bits < 63: ll_type = 0 else: if maxsize_bits < 31: ll_type = 0 if ll_type is None: one = 1 if one << 65 < one << 62: ll_type = 1 else: ll_type = 2 # basic conversion print(int(14187744.)) print("%d" % 14187744.) if ll_type == 2: print(int(2.**100)) print("%d" % 2.**100) testpass = True p2_rng = ((30,63,127),(62,63,127))[is_64bit][ll_type] for i in range(0,p2_rng): bitcnt = len(bin(int(2.**i))) - 3; if i != bitcnt: print('fail: 2.**%u was %u bits long' % (i, bitcnt)); testpass = False print("power of 2 test: %s" % (testpass and 'passed' or 'failed')) # TODO why does 10**12 fail this test for single precision float? testpass = True p10_rng = 9 for i in range(0,p10_rng): digcnt = len(str(int(10.**i))) - 1; if i != digcnt: print('fail: 10.**%u was %u digits long' % (i, digcnt)); testpass = False print("power of 10 test: %s" % (testpass and 'passed' or 'failed')) def fp2int_test(num, name, should_fail): try: x = int(num) passed = ~should_fail except: passed = should_fail print('%s: %s' % (name, passed and 'passed' or 'failed')) if ll_type != 2: if ll_type == 0: if is_64bit: neg_bad_fp = -1.00000005*2.**62. pos_bad_fp = 2.**62. neg_good_fp = -2.**62. pos_good_fp = 0.99999993*2.**62. else: neg_bad_fp = -1.00000005*2.**30. pos_bad_fp = 2.**30. neg_good_fp = -2.**30. pos_good_fp = 0.9999999499*2.**30. else: neg_bad_fp = -0.51*2.**64. pos_bad_fp = 2.**63. neg_good_fp = -2.**63. pos_good_fp = 1.9999998*2.**62. fp2int_test(neg_bad_fp, 'neg bad', True) fp2int_test(pos_bad_fp, 'pos bad', True) fp2int_test(neg_good_fp, 'neg good', False) fp2int_test(pos_good_fp, 'pos good', False) else: fp2int_test(-1.999999879*2.**126., 'large neg', False) fp2int_test(1.999999879*2.**126., 'large pos', False) fp2int_test(float('inf'), 'inf test', True) fp2int_test(float('nan'), 'NaN test', True) # test numbers < 1 (this used to fail; see issue #1044) fp2int_test(0.0001, 'small num', False) struct.pack('I', int(1/2))
mit
koltegirish/Arduino
arduino-core/src/processing/app/i18n/python/requests/packages/charade/langhebrewmodel.py
168
11343
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Simon Montagu # Portions created by the Initial Developer are Copyright (C) 2005 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Shoshannah Forbes - original C code (?) # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Windows-1255 language model # Character Mapping Table: win1255_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50 253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70 124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214, 215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221, 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227, 106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234, 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237, 238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250, 9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23, 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 98.4004% # first 1024 sequences: 1.5981% # rest sequences: 0.087% # negative sequences: 0.0015% HebrewLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, 3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, 1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2, 1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3, 1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2, 1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2, 1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2, 0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2, 0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2, 1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2, 0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1, 0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0, 0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2, 0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2, 0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2, 0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2, 0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2, 0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2, 0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1, 0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2, 0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2, 0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2, 0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2, 0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0, 1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2, 0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0, 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3, 0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0, 0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0, 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, 0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0, 2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0, 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2, 0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0, 0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1, 1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1, 0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1, 2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1, 1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1, 2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1, 1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1, 2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0, 0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1, 1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1, 0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0, ) Win1255HebrewModel = { 'charToOrderMap': win1255_CharToOrderMap, 'precedenceMatrix': HebrewLangModel, 'mTypicalPositiveRatio': 0.984004, 'keepEnglishLetter': False, 'charsetName': "windows-1255" } # flake8: noqa
lgpl-2.1
nurav/balrog
auslib/util/comparison.py
2
1171
import operator import re from auslib.util.versions import MozillaVersion operators = { '>=': operator.ge, '>': operator.gt, '<': operator.lt, '<=': operator.le, } def get_op(pattern): # only alphanumeric characters means no operator if re.match('\w+', pattern): return operator.eq, pattern for op in operators: m = re.match('(%s)([\.\w]+)' % op, pattern) if m: op, operand = m.groups() return operators[op], operand def string_compare(value, compstr): """Do a string comparison of a bare string with another, which may carry a comparison operator. eg string_compare('a', '>b') is False """ opfunc, operand = get_op(compstr) return opfunc(value, operand) def version_compare(value, compstr): """Do a version comparison between a string (representing a version), with another which may carry a comparison operator. A true version comparison is done. eg version_compare('1.1', '>1.0') is True """ opfunc, operand = get_op(compstr) value = MozillaVersion(value) operand = MozillaVersion(operand) return opfunc(value, operand)
mpl-2.0
ptomasroos/vitess
test/automation_horizontal_resharding.py
8
5585
#!/usr/bin/env python # # Copyright 2015, Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. """End-to-end test for horizontal resharding automation.""" import environment import utils import worker def setUpModule(): worker.setUpModule() utils.Vtctld().start() def tearDownModule(): worker.tearDownModule() class TestAutomationHorizontalResharding(worker.TestBaseSplitClone): """This test reuses worker.py because worker.py also covers the happy path of the horizontal resharding code. Instead of running the different resharding steps "manually" as part of the test, they will be run by the automation cluster operation. """ KEYSPACE = 'test_keyspace' def test_regular_operation(self): # Use a dedicated worker to run all vtworker commands. worker_proc, _, worker_rpc_port = utils.run_vtworker_bg( ['--cell', 'test_nj'], auto_log=True) vtworker_endpoint = 'localhost:' + str(worker_rpc_port) automation_server_proc, automation_server_port = ( utils.run_automation_server()) source_shard_list = '0' dest_shard_list = '-80,80-' _, vtctld_endpoint = utils.vtctld.rpc_endpoint() utils.run( environment.binary_argstr('automation_client') + ' --server localhost:' + str(automation_server_port) + ' --task HorizontalReshardingTask' + ' --param keyspace=' + self.KEYSPACE + ' --param source_shard_list=' + source_shard_list + ' --param dest_shard_list=' + dest_shard_list + ' --param vtctld_endpoint=' + vtctld_endpoint + ' --param vtworker_endpoint=' + vtworker_endpoint) self.verify() utils.kill_sub_process(automation_server_proc, soft=True) utils.kill_sub_process(worker_proc, soft=True) def verify(self): self.assert_shard_data_equal(0, worker.shard_master, worker.shard_0_tablets.replica) self.assert_shard_data_equal(1, worker.shard_master, worker.shard_1_tablets.replica) # Verify effect of MigrateServedTypes. Dest shards are serving now. utils.check_srv_keyspace('test_nj', self.KEYSPACE, 'Partitions(master): -80 80-\n' + 'Partitions(rdonly): -80 80-\n' + 'Partitions(replica): -80 80-\n') # Check that query service is disabled (source shard) or enabled (dest). # The 'rdonly' tablet requires an explicit healthcheck first because # the following sequence of events is happening in this test: # - SplitDiff returns 'rdonly' as 'spare' tablet (NOT_SERVING) # - MigrateServedTypes runs and does not refresh then 'spare' tablet # (still NOT_SERVING) # Shard_TabletControl.DisableQueryService=true will be set in the topology # - explicit or periodic healthcheck runs: # a) tablet seen as caught up, change type from 'spare' to 'rdonly' # (change to SERVING) # b) post-action callback agent.refreshTablet() reads the topology # and finds out that DisableQueryService=true is set. # (change to NOT_SERVING) # # We must run an explicit healthcheck or we can see one of the two states: # - NOT_SERVING, DisableQueryService=false, tablet type 'spare' # (immediately after SplitDiff returned) # - SERVING, DisableQueryService=false, tablet type 'rdonly' # (during healthcheck before post-action callback is called) utils.run_vtctl(['RunHealthCheck', worker.shard_rdonly1.tablet_alias, 'rdonly'], auto_log=True) # source shard: query service must be disabled after MigrateServedTypes. utils.check_tablet_query_service( self, worker.shard_rdonly1, serving=False, tablet_control_disabled=True) utils.check_tablet_query_service( self, worker.shard_replica, serving=False, tablet_control_disabled=True) utils.check_tablet_query_service( self, worker.shard_master, serving=False, tablet_control_disabled=True) # dest shard -80: query service must be disabled after MigrateServedTypes. # Run explicit healthcheck because 'rdonly' tablet may still be 'spare'. utils.run_vtctl(['RunHealthCheck', worker.shard_0_rdonly1.tablet_alias, 'rdonly'], auto_log=True) utils.check_tablet_query_service( self, worker.shard_0_rdonly1, serving=True, tablet_control_disabled=False) utils.check_tablet_query_service( self, worker.shard_0_replica, serving=True, tablet_control_disabled=False) utils.check_tablet_query_service( self, worker.shard_0_master, serving=True, tablet_control_disabled=False) # dest shard 80-: query service must be disabled after MigrateServedTypes. # Run explicit healthcheck because 'rdonly' tablet is still 'spare'. utils.run_vtctl(['RunHealthCheck', worker.shard_1_rdonly1.tablet_alias, 'rdonly'], auto_log=True) utils.check_tablet_query_service( self, worker.shard_1_rdonly1, serving=True, tablet_control_disabled=False) utils.check_tablet_query_service( self, worker.shard_1_replica, serving=True, tablet_control_disabled=False) utils.check_tablet_query_service( self, worker.shard_1_master, serving=True, tablet_control_disabled=False) if __name__ == '__main__': utils.main(test_options=worker.add_test_options)
bsd-3-clause
lscheinkman/nupic
examples/opf/clients/hotgym/anomaly/one_gym/model_params/rec_center_hourly_model_params.py
12
5565
MODEL_PARAMS = \ { 'aggregationInfo': { 'days': 0, 'fields': [], 'hours': 0, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0}, 'model': 'HTMPrediction', 'modelParams': { 'anomalyParams': { u'anomalyCacheRecords': None, u'autoDetectThreshold': None, u'autoDetectWaitRecords': None}, 'clParams': { 'alpha': 0.01962508905154251, 'verbosity': 0, 'regionName': 'SDRClassifierRegion', 'steps': '1'}, 'inferenceType': 'TemporalAnomaly', 'sensorParams': { 'encoders': { '_classifierInput': { 'classifierOnly': True, 'clipInput': True, 'fieldname': 'kw_energy_consumption', 'maxval': 53.0, 'minval': 0.0, 'n': 115, 'name': '_classifierInput', 'type': 'ScalarEncoder', 'w': 21}, u'kw_energy_consumption': { 'clipInput': True, 'fieldname': 'kw_energy_consumption', 'maxval': 53.0, 'minval': 0.0, 'n': 29, 'name': 'kw_energy_consumption', 'type': 'ScalarEncoder', 'w': 21}, u'timestamp_dayOfWeek': None, u'timestamp_timeOfDay': { 'fieldname': 'timestamp', 'name': 'timestamp', 'timeOfDay': ( 21, 6.090344152692538), 'type': 'DateEncoder'}, u'timestamp_weekend': { 'fieldname': 'timestamp', 'name': 'timestamp', 'type': 'DateEncoder', 'weekend': ( 21, 1)}}, 'sensorAutoReset': None, 'verbosity': 0}, 'spEnable': True, 'spParams': { 'columnCount': 2048, 'globalInhibition': 1, 'inputWidth': 0, 'boostStrength': 2.0, 'numActiveColumnsPerInhArea': 40, 'potentialPct': 0.8, 'seed': 1956, 'spVerbosity': 0, 'spatialImp': 'cpp', 'synPermActiveInc': 0.05, 'synPermConnected': 0.1, 'synPermInactiveDec': 0.08568228006654939}, 'tmEnable': True, 'tmParams': { 'activationThreshold': 12, 'cellsPerColumn': 32, 'columnCount': 2048, 'globalDecay': 0.0, 'initialPerm': 0.21, 'inputWidth': 2048, 'maxAge': 0, 'maxSegmentsPerCell': 128, 'maxSynapsesPerSegment': 32, 'minThreshold': 10, 'newSynapseCount': 20, 'outputType': 'normal', 'pamLength': 1, 'permanenceDec': 0.1, 'permanenceInc': 0.1, 'seed': 1960, 'temporalImp': 'cpp', 'verbosity': 0}, 'trainSPNetOnlyIfRequested': False}, 'predictAheadTime': None, 'version': 1}
agpl-3.0
macNtoucs/MIT_Moblie
Modules/Campus Map/Utilities/DownloadTiles.py
2
1834
# This python script will download the map tiles from MIT's Map Server # and name them with a column/row convention tile_extents = [ # [13, [2476,3029], [2479,3030]], # [14, [4953,6058], [4959,6060]], [15, [9911,12118], [9915,12121]], [16, [19822,24237], [19830,24243]], [17, [39645,48475],[39659,48487]], [18, [79290,96950],[79318,96974]], [19, [158581,193900], [158635,193946]] ] maxZoom = 17 from urllib2 import Request, urlopen, URLError, HTTPError file_mode = "b" # Used on JavaScript map # url = "http://web.mit.edu/campus-map/tiles/tile_" + str(i) + "_" + str(j) + "_" + str(zoom) + ".png" # 8-Bit PNGs # baseURL = "http://maps.mit.edu/ArcGIS/rest/services/Mobile/WhereIs_Mobile/MapServer/tile/" # 24-Bit PNGs # baseURL = "http://maps.mit.edu/ArcGIS/rest/services/Mobile/WhereIs_Mobile24/MapServer/tile/" # # Both map and Google backing baseURL = "http://maps.mit.edu/ArcGIS/rest/services/Mobile/WhereIs_MobileAll/MapServer/tile/" for tile in tile_extents: zoom = tile[0] start = tile[1] end = tile[2] print 'zoom ', zoom print 'start ', start print 'end ', end for i in range( start[0] , end[0]+1 ): for j in range(start[1] , end[1]+1 ): t = str(zoom)+"/"+str(i)+"/"+str(j) + ".png" global maxZoom; file_name = "MITTile_" + str(100 / (2**(maxZoom - zoom))) + "_"+ str(i - start[0])+ "_"+ str(j - start[1]) + ".png" url = baseURL + str(zoom) + "/" + str(j)+"/"+str(i) req = Request(url) try: f = urlopen(req) print "downloading " + url # Open our local file for writing local_file = open(file_name, "w" + file_mode) #Write to our local file local_file.write(f.read()) local_file.close() #handle errors except HTTPError, e: print "HTTP Error:",e.code , url except URLError, e: print "URL Error:",e.reason , url
lgpl-2.1
weimingtom/python-for-android
python3-alpha/python3-src/Tools/demo/vector.py
110
1452
#!/usr/bin/env python3 """ A demonstration of classes and their special methods in Python. """ class Vec: """A simple vector class. Instances of the Vec class can be constructed from numbers >>> a = Vec(1, 2, 3) >>> b = Vec(3, 2, 1) added >>> a + b Vec(4, 4, 4) subtracted >>> a - b Vec(-2, 0, 2) and multiplied by a scalar on the left >>> 3.0 * a Vec(3.0, 6.0, 9.0) or on the right >>> a * 3.0 Vec(3.0, 6.0, 9.0) """ def __init__(self, *v): self.v = list(v) @classmethod def fromlist(cls, v): if not isinstance(v, list): raise TypeError inst = cls() inst.v = v return inst def __repr__(self): args = ', '.join(repr(x) for x in self.v) return 'Vec({})'.format(args) def __len__(self): return len(self.v) def __getitem__(self, i): return self.v[i] def __add__(self, other): # Element-wise addition v = [x + y for x, y in zip(self.v, other.v)] return Vec.fromlist(v) def __sub__(self, other): # Element-wise subtraction v = [x - y for x, y in zip(self.v, other.v)] return Vec.fromlist(v) def __mul__(self, scalar): # Multiply by scalar v = [x * scalar for x in self.v] return Vec.fromlist(v) __rmul__ = __mul__ def test(): import doctest doctest.testmod() test()
apache-2.0
sonaht/ansible
lib/ansible/playbook/loop_control.py
66
1328
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base class LoopControl(Base): _loop_var = FieldAttribute(isa='str') _label = FieldAttribute(isa='str') _pause = FieldAttribute(isa='int') def __init__(self): super(LoopControl, self).__init__() @staticmethod def load(data, variable_manager=None, loader=None): t = LoopControl() return t.load_data(data, variable_manager=variable_manager, loader=loader)
gpl-3.0
tempbottle/rethinkdb
test/interface/shard_balancing.py
25
4831
#!/usr/bin/env python # Copyright 2014 RethinkDB, all rights reserved. """The `interface.shard_balancing` test checks that RethinkDB generates balanced shards in a variety of scenarios.""" from __future__ import print_function import pprint, os, sys, time startTime = time.time() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) r = utils.import_python_driver() dbName, tableName = utils.get_test_db_table() print("Spinning up two servers (%.2fs)" % (time.time() - startTime)) with driver.Cluster(initial_servers=['a', 'b'], output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as cluster: cluster.check() print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime)) conn = r.connect(host=cluster[0].host, port=cluster[0].driver_port) conn2 = r.connect(host=cluster[1].host, port=cluster[1].driver_port) print("Creating db if necessary (%.2fs)" % (time.time() - startTime)) if not dbName in r.db_list().run(conn): r.db_create(dbName).run(conn) print("Testing pre-sharding with UUID primary keys (%.2fs)" % (time.time() - startTime)) res = r.db(dbName).table_create("uuid_pkey").run(conn) assert res["tables_created"] == 1 r.db(dbName).table("uuid_pkey").reconfigure(shards=10, replicas=1).run(conn) r.db(dbName).table("uuid_pkey").wait().run(conn) res = r.db(dbName).table("uuid_pkey").insert([{}]*1000).run(conn) assert res["inserted"] == 1000 and res["errors"] == 0 res = r.db(dbName).table("uuid_pkey").info().run(conn)["doc_count_estimates"] pprint.pprint(res) for num in res: assert 50 < num < 200 print("Testing down-sharding existing balanced shards (%.2fs)" % (time.time() - startTime)) r.db(dbName).table("uuid_pkey").reconfigure(shards=2, replicas=1).run(conn) r.db(dbName).table("uuid_pkey").wait().run(conn) res = r.db(dbName).table("uuid_pkey").info().run(conn)["doc_count_estimates"] pprint.pprint(res) for num in res: assert 250 < num < 750 print("Checking shard balancing by reading directly (%.2fs)" % (time.time() - startTime)) direct_counts = [ r.db(dbName).table("uuid_pkey", read_mode="_debug_direct").count().run(conn), r.db(dbName).table("uuid_pkey", read_mode="_debug_direct").count().run(conn2)] pprint.pprint(direct_counts) for num in direct_counts: assert 250 < num < 750 print("Testing sharding of existing inserted data (%.2fs)" % (time.time() - startTime)) res = r.db(dbName).table_create("numeric_pkey").run(conn) assert res["tables_created"] == 1 res = r.db(dbName).table("numeric_pkey").insert([{"id": n} for n in xrange(1000)]).run(conn) assert res["inserted"] == 1000 and res["errors"] == 0 r.db(dbName).table("numeric_pkey").reconfigure(shards=10, replicas=1).run(conn) r.db(dbName).table("numeric_pkey").wait().run(conn) res = r.db(dbName).table("numeric_pkey").info().run(conn)["doc_count_estimates"] pprint.pprint(res) for num in res: assert 50 < num < 200 print("Creating an unbalanced table (%.2fs)" % (time.time() - startTime)) res = r.db(dbName).table_create("unbalanced").run(conn) assert res["tables_created"] == 1 r.db(dbName).table("unbalanced").reconfigure(shards=2, replicas=1).run(conn) r.db(dbName).table("unbalanced").wait().run(conn) res = r.db(dbName).table("unbalanced").insert([{"id": n} for n in xrange(1000)]).run(conn) assert res["inserted"] == 1000 and res["errors"] == 0 res = r.db(dbName).table("unbalanced").info().run(conn)["doc_count_estimates"] pprint.pprint(res) assert res[0] > 500 assert res[1] < 100 # If we ever implement #2896, we should make sure the server has an issue now print("Fixing the unbalanced table (%.2fs)" % (time.time() - startTime)) status_before = r.db(dbName).table("unbalanced").status().run(conn) res = r.db(dbName).table("unbalanced").rebalance().run(conn) assert res["rebalanced"] == 1 assert len(res["status_changes"]) == 1 assert res["status_changes"][0]["old_val"] == status_before assert res["status_changes"][0]["new_val"]["status"]["all_replicas_ready"] == False r.db(dbName).table("unbalanced").wait().run(conn) res = r.db(dbName).table("unbalanced").info().run(conn)["doc_count_estimates"] pprint.pprint(res) for num in res: assert 250 < num < 750 print("Cleaning up (%.2fs)" % (time.time() - startTime)) print("Done. (%.2fs)" % (time.time() - startTime))
agpl-3.0
dstftw/youtube-dl
youtube_dl/extractor/pornotube.py
64
3154
from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import int_or_none class PornotubeIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com/(?:[^?#]*?)/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.pornotube.com/orientation/straight/video/4964/title/weird-hot-and-wet-science', 'md5': '60fc5a4f0d93a97968fc7999d98260c9', 'info_dict': { 'id': '4964', 'ext': 'mp4', 'upload_date': '20141203', 'title': 'Weird Hot and Wet Science', 'description': 'md5:a8304bef7ef06cb4ab476ca6029b01b0', 'categories': ['Adult Humor', 'Blondes'], 'uploader': 'Alpha Blue Archives', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1417582800, 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) token = self._download_json( 'https://api.aebn.net/auth/v2/origins/authenticate', video_id, note='Downloading token', data=json.dumps({'credentials': 'Clip Application'}).encode('utf-8'), headers={ 'Content-Type': 'application/json', 'Origin': 'http://www.pornotube.com', })['tokenKey'] video_url = self._download_json( 'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id, video_id, note='Downloading delivery information', headers={'Authorization': token})['mediaUrl'] FIELDS = ( 'title', 'description', 'startSecond', 'endSecond', 'publishDate', 'studios{name}', 'categories{name}', 'movieId', 'primaryImageNumber' ) info = self._download_json( 'https://api.aebn.net/content/v2/clips/%s?fields=%s' % (video_id, ','.join(FIELDS)), video_id, note='Downloading metadata', headers={'Authorization': token}) if isinstance(info, list): info = info[0] title = info['title'] timestamp = int_or_none(info.get('publishDate'), scale=1000) uploader = info.get('studios', [{}])[0].get('name') movie_id = info.get('movieId') primary_image_number = info.get('primaryImageNumber') thumbnail = None if movie_id and primary_image_number: thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % ( movie_id, movie_id, primary_image_number) start = int_or_none(info.get('startSecond')) end = int_or_none(info.get('endSecond')) duration = end - start if start and end else None categories = [c['name'] for c in info.get('categories', []) if c.get('name')] return { 'id': video_id, 'url': video_url, 'title': title, 'description': info.get('description'), 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'thumbnail': thumbnail, 'categories': categories, 'age_limit': 18, }
unlicense
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/sklearn/datasets/olivetti_faces.py
13
4720
"""Modified Olivetti faces dataset. The original database was available from http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html The version retrieved here comes in MATLAB format from the personal web page of Sam Roweis: http://www.cs.nyu.edu/~roweis/ There are ten different images of each of 40 distinct subjects. For some subjects, the images were taken at different times, varying the lighting, facial expressions (open / closed eyes, smiling / not smiling) and facial details (glasses / no glasses). All the images were taken against a dark homogeneous background with the subjects in an upright, frontal position (with tolerance for some side movement). The original dataset consisted of 92 x 112, while the Roweis version consists of 64x64 images. """ # Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca> # License: BSD 3 clause from io import BytesIO from os.path import exists from os import makedirs try: # Python 2 import urllib2 urlopen = urllib2.urlopen except ImportError: # Python 3 import urllib.request urlopen = urllib.request.urlopen import numpy as np from scipy.io.matlab import loadmat from .base import get_data_home, Bunch from .base import _pkl_filepath from ..utils import check_random_state from ..externals import joblib DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat" TARGET_FILENAME = "olivetti.pkz" # Grab the module-level docstring to use as a description of the # dataset MODULE_DOCS = __doc__ def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0, download_if_missing=True): """Loader for the Olivetti faces data-set from AT&T. Read more in the :ref:`User Guide <olivetti_faces>`. Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. shuffle : boolean, optional If True the order of the dataset is shuffled to avoid having images of the same person grouped. download_if_missing : optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. random_state : optional, integer or RandomState object The seed or the random number generator used to shuffle the data. Returns ------- An object with the following attributes: data : numpy array of shape (400, 4096) Each row corresponds to a ravelled face image of original size 64 x 64 pixels. images : numpy array of shape (400, 64, 64) Each row is a face image corresponding to one of the 40 subjects of the dataset. target : numpy array of shape (400, ) Labels associated to each face image. Those labels are ranging from 0-39 and correspond to the Subject IDs. DESCR : string Description of the modified Olivetti Faces Dataset. Notes ------ This dataset consists of 10 pictures each of 40 individuals. The original database was available from (now defunct) http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html The version retrieved here comes in MATLAB format from the personal web page of Sam Roweis: http://www.cs.nyu.edu/~roweis/ """ data_home = get_data_home(data_home=data_home) if not exists(data_home): makedirs(data_home) filepath = _pkl_filepath(data_home, TARGET_FILENAME) if not exists(filepath): print('downloading Olivetti faces from %s to %s' % (DATA_URL, data_home)) fhandle = urlopen(DATA_URL) buf = BytesIO(fhandle.read()) mfile = loadmat(buf) faces = mfile['faces'].T.copy() joblib.dump(faces, filepath, compress=6) del mfile else: faces = joblib.load(filepath) # We want floating point data, but float32 is enough (there is only # one byte of precision in the original uint8s anyway) faces = np.float32(faces) faces = faces - faces.min() faces /= faces.max() faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1) # 10 images per class, 400 images total, each class is contiguous. target = np.array([i // 10 for i in range(400)]) if shuffle: random_state = check_random_state(random_state) order = random_state.permutation(len(faces)) faces = faces[order] target = target[order] return Bunch(data=faces.reshape(len(faces), -1), images=faces, target=target, DESCR=MODULE_DOCS)
mit
RO-ny9/python-for-android
python3-alpha/python3-src/Lib/test/test_zipimport.py
55
17221
import sys import os import marshal import imp import struct import time import unittest from test import support from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co # some tests can be ran even without zlib try: import zlib except ImportError: zlib = None from zipfile import ZipFile, ZipInfo, ZIP_STORED, ZIP_DEFLATED import zipimport import linecache import doctest import inspect import io from traceback import extract_tb, extract_stack, print_tb raise_src = 'def do_raise(): raise TypeError\n' def make_pyc(co, mtime): data = marshal.dumps(co) if type(mtime) is type(0.0): # Mac mtimes need a bit of special casing if mtime < 0x7fffffff: mtime = int(mtime) else: mtime = int(-0x100000000 + int(mtime)) pyc = imp.get_magic() + struct.pack("<i", int(mtime)) + data return pyc def module_path_to_dotted_name(path): return path.replace(os.sep, '.') NOW = time.time() test_pyc = make_pyc(test_co, NOW) TESTMOD = "ziptestmodule" TESTPACK = "ziptestpackage" TESTPACK2 = "ziptestpackage2" TEMP_ZIP = os.path.abspath("junk95142.zip") pyc_file = imp.cache_from_source(TESTMOD + '.py') pyc_ext = ('.pyc' if __debug__ else '.pyo') class UncompressedZipImportTestCase(ImportHooksBaseTestCase): compression = ZIP_STORED def setUp(self): # We're reusing the zip archive path, so we must clear the # cached directory info and linecache linecache.clearcache() zipimport._zip_directory_cache.clear() ImportHooksBaseTestCase.setUp(self) def doTest(self, expected_ext, files, *modules, **kw): z = ZipFile(TEMP_ZIP, "w") try: for name, (mtime, data) in files.items(): zinfo = ZipInfo(name, time.localtime(mtime)) zinfo.compress_type = self.compression z.writestr(zinfo, data) z.close() stuff = kw.get("stuff", None) if stuff is not None: # Prepend 'stuff' to the start of the zipfile with open(TEMP_ZIP, "rb") as f: data = f.read() with open(TEMP_ZIP, "wb") as f: f.write(stuff) f.write(data) sys.path.insert(0, TEMP_ZIP) mod = __import__(".".join(modules), globals(), locals(), ["__dummy__"]) call = kw.get('call') if call is not None: call(mod) if expected_ext: file = mod.get_file() self.assertEqual(file, os.path.join(TEMP_ZIP, *modules) + expected_ext) finally: z.close() os.remove(TEMP_ZIP) def testAFakeZlib(self): # # This could cause a stack overflow before: importing zlib.py # from a compressed archive would cause zlib to be imported # which would find zlib.py in the archive, which would... etc. # # This test *must* be executed first: it must be the first one # to trigger zipimport to import zlib (zipimport caches the # zlib.decompress function object, after which the problem being # tested here wouldn't be a problem anymore... # (Hence the 'A' in the test method name: to make it the first # item in a list sorted by name, like unittest.makeSuite() does.) # # This test fails on platforms on which the zlib module is # statically linked, but the problem it tests for can't # occur in that case (builtin modules are always found first), # so we'll simply skip it then. Bug #765456. # if "zlib" in sys.builtin_module_names: return if "zlib" in sys.modules: del sys.modules["zlib"] files = {"zlib.py": (NOW, test_src)} try: self.doTest(".py", files, "zlib") except ImportError: if self.compression != ZIP_DEFLATED: self.fail("expected test to not raise ImportError") else: if self.compression != ZIP_STORED: self.fail("expected test to raise ImportError") def testPy(self): files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD) def testPyc(self): files = {TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTMOD) def testBoth(self): files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTMOD) def testEmptyPy(self): files = {TESTMOD + ".py": (NOW, "")} self.doTest(None, files, TESTMOD) def testBadMagic(self): # make pyc magic word invalid, forcing loading from .py badmagic_pyc = bytearray(test_pyc) badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, badmagic_pyc)} self.doTest(".py", files, TESTMOD) def testBadMagic2(self): # make pyc magic word invalid, causing an ImportError badmagic_pyc = bytearray(test_pyc) badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)} try: self.doTest(".py", files, TESTMOD) except ImportError: pass else: self.fail("expected ImportError; import from bad pyc") def testBadMTime(self): badtime_pyc = bytearray(test_pyc) # flip the second bit -- not the first as that one isn't stored in the # .py's mtime in the zip archive. badtime_pyc[7] ^= 0x02 files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, badtime_pyc)} self.doTest(".py", files, TESTMOD) def testPackage(self): packdir = TESTPACK + os.sep files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc), packdir + TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTPACK, TESTMOD) def testDeepPackage(self): packdir = TESTPACK + os.sep packdir2 = packdir + TESTPACK2 + os.sep files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTPACK, TESTPACK2, TESTMOD) def testZipImporterMethods(self): packdir = TESTPACK + os.sep packdir2 = packdir + TESTPACK2 + os.sep files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)} z = ZipFile(TEMP_ZIP, "w") try: for name, (mtime, data) in files.items(): zinfo = ZipInfo(name, time.localtime(mtime)) zinfo.compress_type = self.compression z.writestr(zinfo, data) z.close() zi = zipimport.zipimporter(TEMP_ZIP) self.assertEqual(zi.archive, TEMP_ZIP) self.assertEqual(zi.is_package(TESTPACK), True) mod = zi.load_module(TESTPACK) self.assertEqual(zi.get_filename(TESTPACK), mod.__file__) self.assertEqual(zi.is_package(packdir + '__init__'), False) self.assertEqual(zi.is_package(packdir + TESTPACK2), True) self.assertEqual(zi.is_package(packdir2 + TESTMOD), False) mod_path = packdir2 + TESTMOD mod_name = module_path_to_dotted_name(mod_path) __import__(mod_name) mod = sys.modules[mod_name] self.assertEqual(zi.get_source(TESTPACK), None) self.assertEqual(zi.get_source(mod_path), None) self.assertEqual(zi.get_filename(mod_path), mod.__file__) # To pass in the module name instead of the path, we must use the # right importer loader = mod.__loader__ self.assertEqual(loader.get_source(mod_name), None) self.assertEqual(loader.get_filename(mod_name), mod.__file__) # test prefix and archivepath members zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK) self.assertEqual(zi2.archive, TEMP_ZIP) self.assertEqual(zi2.prefix, TESTPACK + os.sep) finally: z.close() os.remove(TEMP_ZIP) def testZipImporterMethodsInSubDirectory(self): packdir = TESTPACK + os.sep packdir2 = packdir + TESTPACK2 + os.sep files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)} z = ZipFile(TEMP_ZIP, "w") try: for name, (mtime, data) in files.items(): zinfo = ZipInfo(name, time.localtime(mtime)) zinfo.compress_type = self.compression z.writestr(zinfo, data) z.close() zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir) self.assertEqual(zi.archive, TEMP_ZIP) self.assertEqual(zi.prefix, packdir) self.assertEqual(zi.is_package(TESTPACK2), True) mod = zi.load_module(TESTPACK2) self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__) self.assertEqual( zi.is_package(TESTPACK2 + os.sep + '__init__'), False) self.assertEqual( zi.is_package(TESTPACK2 + os.sep + TESTMOD), False) mod_path = TESTPACK2 + os.sep + TESTMOD mod_name = module_path_to_dotted_name(mod_path) __import__(mod_name) mod = sys.modules[mod_name] self.assertEqual(zi.get_source(TESTPACK2), None) self.assertEqual(zi.get_source(mod_path), None) self.assertEqual(zi.get_filename(mod_path), mod.__file__) # To pass in the module name instead of the path, we must use the # right importer loader = mod.__loader__ self.assertEqual(loader.get_source(mod_name), None) self.assertEqual(loader.get_filename(mod_name), mod.__file__) finally: z.close() os.remove(TEMP_ZIP) def testGetData(self): z = ZipFile(TEMP_ZIP, "w") z.compression = self.compression try: name = "testdata.dat" data = bytes(x for x in range(256)) z.writestr(name, data) z.close() zi = zipimport.zipimporter(TEMP_ZIP) self.assertEqual(data, zi.get_data(name)) self.assertIn('zipimporter object', repr(zi)) finally: z.close() os.remove(TEMP_ZIP) def testImporterAttr(self): src = """if 1: # indent hack def get_file(): return __file__ if __loader__.get_data("some.data") != b"some data": raise AssertionError("bad data")\n""" pyc = make_pyc(compile(src, "<???>", "exec"), NOW) files = {TESTMOD + pyc_ext: (NOW, pyc), "some.data": (NOW, "some data")} self.doTest(pyc_ext, files, TESTMOD) def testImport_WithStuff(self): # try importing from a zipfile which contains additional # stuff at the beginning of the file files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD, stuff=b"Some Stuff"*31) def assertModuleSource(self, module): self.assertEqual(inspect.getsource(module), test_src) def testGetSource(self): files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD, call=self.assertModuleSource) def testGetCompiledSource(self): pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW) files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, pyc)} self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource) def runDoctest(self, callback): files = {TESTMOD + ".py": (NOW, test_src), "xyz.txt": (NOW, ">>> log.append(True)\n")} self.doTest(".py", files, TESTMOD, call=callback) def doDoctestFile(self, module): log = [] old_master, doctest.master = doctest.master, None try: doctest.testfile( 'xyz.txt', package=module, module_relative=True, globs=locals() ) finally: doctest.master = old_master self.assertEqual(log,[True]) def testDoctestFile(self): self.runDoctest(self.doDoctestFile) def doDoctestSuite(self, module): log = [] doctest.DocFileTest( 'xyz.txt', package=module, module_relative=True, globs=locals() ).run() self.assertEqual(log,[True]) def testDoctestSuite(self): self.runDoctest(self.doDoctestSuite) def doTraceback(self, module): try: module.do_raise() except: tb = sys.exc_info()[2].tb_next f,lno,n,line = extract_tb(tb, 1)[0] self.assertEqual(line, raise_src.strip()) f,lno,n,line = extract_stack(tb.tb_frame, 1)[0] self.assertEqual(line, raise_src.strip()) s = io.StringIO() print_tb(tb, 1, s) self.assertTrue(s.getvalue().endswith(raise_src)) else: raise AssertionError("This ought to be impossible") def testTraceback(self): files = {TESTMOD + ".py": (NOW, raise_src)} self.doTest(None, files, TESTMOD, call=self.doTraceback) @unittest.skipIf(support.TESTFN_UNENCODABLE is None, "need an unencodable filename") def testUnencodable(self): filename = support.TESTFN_UNENCODABLE + ".zip" z = ZipFile(filename, "w") zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW)) zinfo.compress_type = self.compression z.writestr(zinfo, test_src) z.close() try: zipimport.zipimporter(filename) finally: os.remove(filename) @unittest.skipUnless(zlib, "requires zlib") class CompressedZipImportTestCase(UncompressedZipImportTestCase): compression = ZIP_DEFLATED class BadFileZipImportTestCase(unittest.TestCase): def assertZipFailure(self, filename): self.assertRaises(zipimport.ZipImportError, zipimport.zipimporter, filename) def testNoFile(self): self.assertZipFailure('AdfjdkFJKDFJjdklfjs') def testEmptyFilename(self): self.assertZipFailure('') def testBadArgs(self): self.assertRaises(TypeError, zipimport.zipimporter, None) self.assertRaises(TypeError, zipimport.zipimporter, TESTMOD, kwd=None) def testFilenameTooLong(self): self.assertZipFailure('A' * 33000) def testEmptyFile(self): support.unlink(TESTMOD) open(TESTMOD, 'w+').close() self.assertZipFailure(TESTMOD) def testFileUnreadable(self): support.unlink(TESTMOD) fd = os.open(TESTMOD, os.O_CREAT, 000) try: os.close(fd) self.assertZipFailure(TESTMOD) finally: # If we leave "the read-only bit" set on Windows, nothing can # delete TESTMOD, and later tests suffer bogus failures. os.chmod(TESTMOD, 0o666) support.unlink(TESTMOD) def testNotZipFile(self): support.unlink(TESTMOD) fp = open(TESTMOD, 'w+') fp.write('a' * 22) fp.close() self.assertZipFailure(TESTMOD) # XXX: disabled until this works on Big-endian machines def _testBogusZipFile(self): support.unlink(TESTMOD) fp = open(TESTMOD, 'w+') fp.write(struct.pack('=I', 0x06054B50)) fp.write('a' * 18) fp.close() z = zipimport.zipimporter(TESTMOD) try: self.assertRaises(TypeError, z.find_module, None) self.assertRaises(TypeError, z.load_module, None) self.assertRaises(TypeError, z.is_package, None) self.assertRaises(TypeError, z.get_code, None) self.assertRaises(TypeError, z.get_data, None) self.assertRaises(TypeError, z.get_source, None) error = zipimport.ZipImportError self.assertEqual(z.find_module('abc'), None) self.assertRaises(error, z.load_module, 'abc') self.assertRaises(error, z.get_code, 'abc') self.assertRaises(IOError, z.get_data, 'abc') self.assertRaises(error, z.get_source, 'abc') self.assertRaises(error, z.is_package, 'abc') finally: zipimport._zip_directory_cache.clear() def test_main(): try: support.run_unittest( UncompressedZipImportTestCase, CompressedZipImportTestCase, BadFileZipImportTestCase, ) finally: support.unlink(TESTMOD) if __name__ == "__main__": test_main()
apache-2.0
mmpagani/oq-hazardlib
openquake/hazardlib/gsim/geomatrix_1993.py
4
4554
# The Hazard Library # Copyright (C) 2014, GEM Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Module exports :class:`Geomatrix1993SSlabNSHMP2008`. """ from __future__ import division import numpy as np from openquake.hazardlib.gsim.base import CoeffsTable, GMPE from openquake.hazardlib import const from openquake.hazardlib.imt import PGA, SA class Geomatrix1993SSlabNSHMP2008(GMPE): """ Implements GMPE for subduction intraslab events developed by Geomatrix Consultants, Inc., 1993, "Seismic margin earthquake for the Trojan site: Final unpublished report prepared for Portland General Electric Trojan Nuclear Plant", Ranier, Oregon. This class implements the equation as coded in the subroutine ``getGeom`` in the ``hazgridXnga2.f`` Fortran code available at: http://earthquake.usgs.gov/hazards/products/conterminous/2008/software/ Coefficients are given for the B/C site conditions. """ #: Supported tectonic region type is subduction intraslab DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTRASLAB #: Supported intensity measure types are spectral acceleration, #: and peak ground acceleration DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([ PGA, SA ]) #: Supported intensity measure component is the geometric mean of #: two horizontal components DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL #: Supported standard deviation type is only total. DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([ const.StdDev.TOTAL ]) #: No site parameters required REQUIRES_SITES_PARAMETERS = set() #: Required rupture parameters are magnitude and top of rupture depth REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'ztor')) #: Required distance measure is rrup (closest distance to rupture) REQUIRES_DISTANCES = set(('rrup', )) def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] mean = self._compute_mean(C, rup.mag, rup.ztor, dists.rrup) stddevs = self._compute_stddevs( C, rup.mag, dists.rrup.shape, stddev_types ) return mean, stddevs def _compute_mean(self, C, mag, ztor, rrup): """ Compute mean value as in ``subroutine getGeom`` in ``hazgridXnga2.f`` """ gc0 = 0.2418 ci = 0.3846 gch = 0.00607 g4 = 1.7818 ge = 0.554 gm = 1.414 mean = ( gc0 + ci + ztor * gch + C['gc1'] + gm * mag + C['gc2'] * (10 - mag) ** 3 + C['gc3'] * np.log(rrup + g4 * np.exp(ge * mag)) ) return mean def _compute_stddevs(self, C, mag, num_sites, stddev_types): """ Return total standard deviation. """ std_total = C['gc4'] + C['gc5'] * np.minimum(8., mag) stddevs = [] for _ in stddev_types: stddevs.append(np.zeros(num_sites) + std_total) return stddevs #: Coefficient table obtained from coefficient arrays and variables #: defined in subroutine getGeom in hazgridXnga2.f COEFFS = CoeffsTable(sa_damping=5, table="""\ IMT gc1 gc2 gc3 gc4 gc5 pga 0.0 0.0 -2.556 1.45 -0.1 0.1 1.1880 -0.0011 -2.6550 1.45 -0.1 0.2 0.722 -0.0027 -2.528 1.45 -0.1 0.3 0.246 -0.0036 -2.454 1.45 -0.1 0.5 -0.4 -0.0048 -2.36 1.45 -0.1 1.0 -1.736 -0.0064 -2.234 1.45 -0.1 2.0 -3.3280 -0.0080 -2.107 1.55 -0.1 3.0 -4.511 -0.0089 -2.033 1.65 -0.1 """)
agpl-3.0
foresterh/slackbot
slackbot/utils.py
1
2511
#coding: UTF-8 import os import json import logging import tempfile import requests from contextlib import contextmanager from six.moves import _thread, range, queue import six logger = logging.getLogger(__name__) def download_file(url, fpath): logger.debug('starting to fetch %s', url) r = requests.get(url, stream=True) with open(fpath, 'wb') as f: for chunk in r.iter_content(chunk_size=1024*64): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() logger.debug('fetch %s', fpath) return fpath def to_utf8(s): """Convert a string to utf8. If the argument is an iterable (list/tuple/set), then each element of it would be converted instead. >>> to_utf8('a') 'a' >>> to_utf8(u'a') 'a' >>> to_utf8([u'a', u'b', u'\u4f60']) ['a', 'b', '\\xe4\\xbd\\xa0'] """ if six.PY2: if isinstance(s, str): return s elif isinstance(s, unicode): return s.encode('utf-8') elif isinstance(s, (list, tuple, set)): return [to_utf8(v) for v in s] else: return s else: return s def get_text(url): try: response = requests.get(url) return response.text except: error_messages = ("No way can I do that", "Why would I want to do that?", "Bank error in your favor", "You trying to kill me with that request??") return random.choice(error_messages) def get_json(url, username=None, password=None): data = requests.get(url, auth=(username, password)) if data: return data.json() return None def get_json_with_headers(url, headers): data = requests.get(url, headers=headers) if data: return json.loads(data.text) return None @contextmanager def create_tmp_file(content=''): fd, name = tempfile.mkstemp() try: if content: os.write(fd, content) yield name finally: os.close(fd) os.remove(name) class WorkerPool(object): def __init__(self, func, nworker=10): self.nworker = nworker self.func = func self.queue = queue.Queue() def start(self): for __ in range(self.nworker): _thread.start_new_thread(self.do_work, tuple()) def add_task(self, msg): self.queue.put(msg) def do_work(self): while True: msg = self.queue.get() self.func(msg)
mit
abo-abo/edx-platform
common/lib/capa/capa/checker.py
123
5899
#!/usr/bin/env python """ Commandline tool for doing operations on Problems """ from __future__ import unicode_literals import argparse import logging import sys from path import path from cStringIO import StringIO from calc import UndefinedVariable from capa.capa_problem import LoncapaProblem from mako.lookup import TemplateLookup logging.basicConfig(format="%(levelname)s %(message)s") log = logging.getLogger('capa.checker') class DemoSystem(object): def __init__(self): self.lookup = TemplateLookup(directories=[path(__file__).dirname() / 'templates']) self.DEBUG = True def render_template(self, template_filename, dictionary, context=None): if context is None: context = {} context_dict = {} context_dict.update(dictionary) context_dict.update(context) return self.lookup.get_template(template_filename).render(**context_dict) def main(): parser = argparse.ArgumentParser(description='Check Problem Files') parser.add_argument("command", choices=['test', 'show']) # Watch? Render? Open? parser.add_argument("files", nargs="+", type=argparse.FileType('r')) parser.add_argument("--seed", required=False, type=int) parser.add_argument("--log-level", required=False, default="INFO", choices=['info', 'debug', 'warn', 'error', 'INFO', 'DEBUG', 'WARN', 'ERROR']) args = parser.parse_args() log.setLevel(args.log_level.upper()) system = DemoSystem() for problem_file in args.files: log.info("Opening {0}".format(problem_file.name)) try: problem = LoncapaProblem(problem_file, "fakeid", seed=args.seed, system=system) except Exception as ex: log.error("Could not parse file {0}".format(problem_file.name)) log.exception(ex) continue if args.command == 'test': command_test(problem) elif args.command == 'show': command_show(problem) problem_file.close() # In case we want to do anything else here. def command_show(problem): """Display the text for this problem""" print problem.get_html() def command_test(problem): # We're going to trap stdout/stderr from the problems (yes, some print) old_stdout, old_stderr = sys.stdout, sys.stderr try: sys.stdout = StringIO() sys.stderr = StringIO() check_that_suggested_answers_work(problem) check_that_blanks_fail(problem) log_captured_output(sys.stdout, "captured stdout from {0}".format(problem)) log_captured_output(sys.stderr, "captured stderr from {0}".format(problem)) except Exception as e: log.exception(e) finally: sys.stdout, sys.stderr = old_stdout, old_stderr def check_that_blanks_fail(problem): """Leaving it blank should never work. Neither should a space.""" blank_answers = dict((answer_id, u"") for answer_id in problem.get_question_answers()) grading_results = problem.grade_answers(blank_answers) try: assert(all(result == 'incorrect' for result in grading_results.values())) except AssertionError: log.error("Blank accepted as correct answer in {0} for {1}" .format(problem, [answer_id for answer_id, result in sorted(grading_results.items()) if result != 'incorrect'])) def check_that_suggested_answers_work(problem): """Split this up so that we're only used for formula/numeric answers. Examples of where this fails: * Displayed answers use units but acceptable ones do not. - L1e0.xml - Presents itself as UndefinedVariable (when it tries to pass to calc) * "a or d" is what's displayed, but only "a" or "d" is accepted, not the string "a or d". - L1-e00.xml """ # These are actual answers we get from the responsetypes real_answers = problem.get_question_answers() # all_answers is real_answers + blanks for other answer_ids for which the # responsetypes can't provide us pre-canned answers (customresponse) all_answer_ids = problem.get_answer_ids() all_answers = dict((answer_id, real_answers.get(answer_id, "")) for answer_id in all_answer_ids) log.debug("Real answers: {0}".format(real_answers)) if real_answers: try: real_results = dict((answer_id, result) for answer_id, result in problem.grade_answers(all_answers).items() if answer_id in real_answers) log.debug(real_results) assert(all(result == 'correct' for answer_id, result in real_results.items())) except UndefinedVariable as uv_exc: log.error("The variable \"{0}\" specified in the ".format(uv_exc) + "solution isn't recognized (is it a units measure?).") except AssertionError: log.error("The following generated answers were not accepted for {0}:" .format(problem)) for question_id, result in sorted(real_results.items()): if result != 'correct': log.error(" {0} = {1}".format(question_id, real_answers[question_id])) except Exception as ex: log.error("Uncaught error in {0}".format(problem)) log.exception(ex) def log_captured_output(output_stream, stream_name): output_stream.seek(0) output_text = output_stream.read() if output_text: log.info("##### Begin {0} #####\n".format(stream_name) + output_text) log.info("##### End {0} #####".format(stream_name)) if __name__ == '__main__': sys.exit(main())
agpl-3.0
jblackburne/scikit-learn
benchmarks/bench_random_projections.py
397
8900
""" =========================== Random projection benchmark =========================== Benchmarks for random projections. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import collections import numpy as np import scipy.sparse as sp from sklearn import clone from sklearn.externals.six.moves import xrange from sklearn.random_projection import (SparseRandomProjection, GaussianRandomProjection, johnson_lindenstrauss_min_dim) def type_auto_or_float(val): if val == "auto": return "auto" else: return float(val) def type_auto_or_int(val): if val == "auto": return "auto" else: return int(val) def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_scikit_transformer(X, transfomer): gc.collect() clf = clone(transfomer) # start time t_start = datetime.now() clf.fit(X) delta = (datetime.now() - t_start) # stop time time_to_fit = compute_time(t_start, delta) # start time t_start = datetime.now() clf.transform(X) delta = (datetime.now() - t_start) # stop time time_to_transform = compute_time(t_start, delta) return time_to_fit, time_to_transform # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None): rng = np.random.RandomState(random_state) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def print_row(clf_type, time_fit, time_transform): print("%s | %s | %s" % (clf_type.ljust(30), ("%.4fs" % time_fit).center(12), ("%.4fs" % time_transform).center(12))) if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-features", dest="n_features", default=10 ** 4, type=int, help="Number of features in the benchmarks") op.add_option("--n-components", dest="n_components", default="auto", help="Size of the random subspace." " ('auto' or int > 0)") op.add_option("--ratio-nonzeros", dest="ratio_nonzeros", default=10 ** -3, type=float, help="Number of features in the benchmarks") op.add_option("--n-samples", dest="n_samples", default=500, type=int, help="Number of samples in the benchmarks") op.add_option("--random-seed", dest="random_seed", default=13, type=int, help="Seed used by the random number generators.") op.add_option("--density", dest="density", default=1 / 3, help="Density used by the sparse random projection." " ('auto' or float (0.0, 1.0]") op.add_option("--eps", dest="eps", default=0.5, type=float, help="See the documentation of the underlying transformers.") op.add_option("--transformers", dest="selected_transformers", default='GaussianRandomProjection,SparseRandomProjection', type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. Available: " "GaussianRandomProjection,SparseRandomProjection") op.add_option("--dense", dest="dense", default=False, action="store_true", help="Set input space as a dense matrix.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) opts.n_components = type_auto_or_int(opts.n_components) opts.density = type_auto_or_float(opts.density) selected_transformers = opts.selected_transformers.split(',') ########################################################################### # Generate dataset ########################################################################### n_nonzeros = int(opts.ratio_nonzeros * opts.n_features) print('Dataset statics') print("===========================") print('n_samples \t= %s' % opts.n_samples) print('n_features \t= %s' % opts.n_features) if opts.n_components == "auto": print('n_components \t= %s (auto)' % johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)) else: print('n_components \t= %s' % opts.n_components) print('n_elements \t= %s' % (opts.n_features * opts.n_samples)) print('n_nonzeros \t= %s per feature' % n_nonzeros) print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros) print('') ########################################################################### # Set transformer input ########################################################################### transformers = {} ########################################################################### # Set GaussianRandomProjection input gaussian_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed } transformers["GaussianRandomProjection"] = \ GaussianRandomProjection(**gaussian_matrix_params) ########################################################################### # Set SparseRandomProjection input sparse_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed, "density": opts.density, "eps": opts.eps, } transformers["SparseRandomProjection"] = \ SparseRandomProjection(**sparse_matrix_params) ########################################################################### # Perform benchmark ########################################################################### time_fit = collections.defaultdict(list) time_transform = collections.defaultdict(list) print('Benchmarks') print("===========================") print("Generate dataset benchmarks... ", end="") X_dense, X_sparse = make_sparse_random_data(opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed) X = X_dense if opts.dense else X_sparse print("done") for name in selected_transformers: print("Perform benchmarks for %s..." % name) for iteration in xrange(opts.n_times): print("\titer %s..." % iteration, end="") time_to_fit, time_to_transform = bench_scikit_transformer(X_dense, transformers[name]) time_fit[name].append(time_to_fit) time_transform[name].append(time_to_transform) print("done") print("") ########################################################################### # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Transformer performance:") print("===========================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") print("%s | %s | %s" % ("Transformer".ljust(30), "fit".center(12), "transform".center(12))) print(31 * "-" + ("|" + "-" * 14) * 2) for name in sorted(selected_transformers): print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name])) print("") print("")
bsd-3-clause
alanjw/GreenOpenERP-Win-X86
python/Lib/site-packages/pythonwin/pywin/idle/AutoIndent.py
4
21162
import sys import string, tokenize import PyParse from pywin import default_scintilla_encoding if sys.version_info < (3,): # in py2k, tokenize() takes a 'token eater' callback, while # generate_tokens is a generator that works with str objects. token_generator = tokenize.generate_tokens else: # in py3k tokenize() is the generator working with 'byte' objects, and # token_generator is the 'undocumented b/w compat' function that # theoretically works with str objects - but actually seems to fail) token_generator = tokenize.tokenize class AutoIndent: menudefs = [ ('edit', [ None, ('_Indent region', '<<indent-region>>'), ('_Dedent region', '<<dedent-region>>'), ('Comment _out region', '<<comment-region>>'), ('U_ncomment region', '<<uncomment-region>>'), ('Tabify region', '<<tabify-region>>'), ('Untabify region', '<<untabify-region>>'), ('Toggle tabs', '<<toggle-tabs>>'), ('New indent width', '<<change-indentwidth>>'), ]), ] keydefs = { '<<smart-backspace>>': ['<Key-BackSpace>'], '<<newline-and-indent>>': ['<Key-Return>', '<KP_Enter>'], '<<smart-indent>>': ['<Key-Tab>'] } windows_keydefs = { '<<indent-region>>': ['<Control-bracketright>'], '<<dedent-region>>': ['<Control-bracketleft>'], '<<comment-region>>': ['<Alt-Key-3>'], '<<uncomment-region>>': ['<Alt-Key-4>'], '<<tabify-region>>': ['<Alt-Key-5>'], '<<untabify-region>>': ['<Alt-Key-6>'], '<<toggle-tabs>>': ['<Alt-Key-t>'], '<<change-indentwidth>>': ['<Alt-Key-u>'], } unix_keydefs = { '<<indent-region>>': ['<Alt-bracketright>', '<Meta-bracketright>', '<Control-bracketright>'], '<<dedent-region>>': ['<Alt-bracketleft>', '<Meta-bracketleft>', '<Control-bracketleft>'], '<<comment-region>>': ['<Alt-Key-3>', '<Meta-Key-3>'], '<<uncomment-region>>': ['<Alt-Key-4>', '<Meta-Key-4>'], '<<tabify-region>>': ['<Alt-Key-5>', '<Meta-Key-5>'], '<<untabify-region>>': ['<Alt-Key-6>', '<Meta-Key-6>'], '<<toggle-tabs>>': ['<Alt-Key-t>'], '<<change-indentwidth>>': ['<Alt-Key-u>'], } # usetabs true -> literal tab characters are used by indent and # dedent cmds, possibly mixed with spaces if # indentwidth is not a multiple of tabwidth # false -> tab characters are converted to spaces by indent # and dedent cmds, and ditto TAB keystrokes # indentwidth is the number of characters per logical indent level. # tabwidth is the display width of a literal tab character. # CAUTION: telling Tk to use anything other than its default # tab setting causes it to use an entirely different tabbing algorithm, # treating tab stops as fixed distances from the left margin. # Nobody expects this, so for now tabwidth should never be changed. usetabs = 1 indentwidth = 4 tabwidth = 8 # for IDLE use, must remain 8 until Tk is fixed # If context_use_ps1 is true, parsing searches back for a ps1 line; # else searches for a popular (if, def, ...) Python stmt. context_use_ps1 = 0 # When searching backwards for a reliable place to begin parsing, # first start num_context_lines[0] lines back, then # num_context_lines[1] lines back if that didn't work, and so on. # The last value should be huge (larger than the # of lines in a # conceivable file). # Making the initial values larger slows things down more often. num_context_lines = 50, 500, 5000000 def __init__(self, editwin): self.editwin = editwin self.text = editwin.text def config(self, **options): for key, value in options.iteritems(): if key == 'usetabs': self.usetabs = value elif key == 'indentwidth': self.indentwidth = value elif key == 'tabwidth': self.tabwidth = value elif key == 'context_use_ps1': self.context_use_ps1 = value else: raise KeyError("bad option name: %s" % repr(key)) # If ispythonsource and guess are true, guess a good value for # indentwidth based on file content (if possible), and if # indentwidth != tabwidth set usetabs false. # In any case, adjust the Text widget's view of what a tab # character means. def set_indentation_params(self, ispythonsource, guess=1): if guess and ispythonsource: i = self.guess_indent() if 2 <= i <= 8: self.indentwidth = i if self.indentwidth != self.tabwidth: self.usetabs = 0 self.editwin.set_tabwidth(self.tabwidth) def smart_backspace_event(self, event): text = self.text first, last = self.editwin.get_selection_indices() if first and last: text.delete(first, last) text.mark_set("insert", first) return "break" # Delete whitespace left, until hitting a real char or closest # preceding virtual tab stop. chars = text.get("insert linestart", "insert") if chars == '': if text.compare("insert", ">", "1.0"): # easy: delete preceding newline text.delete("insert-1c") else: text.bell() # at start of buffer return "break" if chars[-1] not in " \t": # easy: delete preceding real char text.delete("insert-1c") return "break" # Ick. It may require *inserting* spaces if we back up over a # tab character! This is written to be clear, not fast. have = len(chars.expandtabs(self.tabwidth)) assert have > 0 want = int((have - 1) / self.indentwidth) * self.indentwidth ncharsdeleted = 0 while 1: chars = chars[:-1] ncharsdeleted = ncharsdeleted + 1 have = len(chars.expandtabs(self.tabwidth)) if have <= want or chars[-1] not in " \t": break text.undo_block_start() text.delete("insert-%dc" % ncharsdeleted, "insert") if have < want: text.insert("insert", ' ' * (want - have)) text.undo_block_stop() return "break" def smart_indent_event(self, event): # if intraline selection: # delete it # elif multiline selection: # do indent-region & return # indent one level text = self.text first, last = self.editwin.get_selection_indices() text.undo_block_start() try: if first and last: if index2line(first) != index2line(last): return self.indent_region_event(event) text.delete(first, last) text.mark_set("insert", first) prefix = text.get("insert linestart", "insert") raw, effective = classifyws(prefix, self.tabwidth) if raw == len(prefix): # only whitespace to the left self.reindent_to(effective + self.indentwidth) else: if self.usetabs: pad = '\t' else: effective = len(prefix.expandtabs(self.tabwidth)) n = self.indentwidth pad = ' ' * (n - effective % n) text.insert("insert", pad) text.see("insert") return "break" finally: text.undo_block_stop() def newline_and_indent_event(self, event): text = self.text first, last = self.editwin.get_selection_indices() text.undo_block_start() try: if first and last: text.delete(first, last) text.mark_set("insert", first) line = text.get("insert linestart", "insert") i, n = 0, len(line) while i < n and line[i] in " \t": i = i+1 if i == n: # the cursor is in or at leading indentation; just inject # an empty line at the start and strip space from current line text.delete("insert - %d chars" % i, "insert") text.insert("insert linestart", '\n') return "break" indent = line[:i] # strip whitespace before insert point i = 0 while line and line[-1] in " \t": line = line[:-1] i = i+1 if i: text.delete("insert - %d chars" % i, "insert") # strip whitespace after insert point while text.get("insert") in " \t": text.delete("insert") # start new line text.insert("insert", '\n') # adjust indentation for continuations and block # open/close first need to find the last stmt lno = index2line(text.index('insert')) y = PyParse.Parser(self.indentwidth, self.tabwidth) for context in self.num_context_lines: startat = max(lno - context, 1) startatindex = repr(startat) + ".0" rawtext = text.get(startatindex, "insert") y.set_str(rawtext) bod = y.find_good_parse_start( self.context_use_ps1, self._build_char_in_string_func(startatindex)) if bod is not None or startat == 1: break y.set_lo(bod or 0) c = y.get_continuation_type() if c != PyParse.C_NONE: # The current stmt hasn't ended yet. if c == PyParse.C_STRING: # inside a string; just mimic the current indent text.insert("insert", indent) elif c == PyParse.C_BRACKET: # line up with the first (if any) element of the # last open bracket structure; else indent one # level beyond the indent of the line with the # last open bracket self.reindent_to(y.compute_bracket_indent()) elif c == PyParse.C_BACKSLASH: # if more than one line in this stmt already, just # mimic the current indent; else if initial line # has a start on an assignment stmt, indent to # beyond leftmost =; else to beyond first chunk of # non-whitespace on initial line if y.get_num_lines_in_stmt() > 1: text.insert("insert", indent) else: self.reindent_to(y.compute_backslash_indent()) else: assert 0, "bogus continuation type " + repr(c) return "break" # This line starts a brand new stmt; indent relative to # indentation of initial line of closest preceding # interesting stmt. indent = y.get_base_indent_string() text.insert("insert", indent) if y.is_block_opener(): self.smart_indent_event(event) elif indent and y.is_block_closer(): self.smart_backspace_event(event) return "break" finally: text.see("insert") text.undo_block_stop() auto_indent = newline_and_indent_event # Our editwin provides a is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. def _build_char_in_string_func(self, startindex): def inner(offset, _startindex=startindex, _icis=self.editwin.is_char_in_string): return _icis(_startindex + "+%dc" % offset) return inner def indent_region_event(self, event): head, tail, chars, lines = self.get_region() for pos in range(len(lines)): line = lines[pos] if line: raw, effective = classifyws(line, self.tabwidth) effective = effective + self.indentwidth lines[pos] = self._make_blanks(effective) + line[raw:] self.set_region(head, tail, chars, lines) return "break" def dedent_region_event(self, event): head, tail, chars, lines = self.get_region() for pos in range(len(lines)): line = lines[pos] if line: raw, effective = classifyws(line, self.tabwidth) effective = max(effective - self.indentwidth, 0) lines[pos] = self._make_blanks(effective) + line[raw:] self.set_region(head, tail, chars, lines) return "break" def comment_region_event(self, event): head, tail, chars, lines = self.get_region() for pos in range(len(lines) - 1): line = lines[pos] lines[pos] = '##' + line self.set_region(head, tail, chars, lines) def uncomment_region_event(self, event): head, tail, chars, lines = self.get_region() for pos in range(len(lines)): line = lines[pos] if not line: continue if line[:2] == '##': line = line[2:] elif line[:1] == '#': line = line[1:] lines[pos] = line self.set_region(head, tail, chars, lines) def tabify_region_event(self, event): head, tail, chars, lines = self.get_region() tabwidth = self._asktabwidth() for pos in range(len(lines)): line = lines[pos] if line: raw, effective = classifyws(line, tabwidth) ntabs, nspaces = divmod(effective, tabwidth) lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:] self.set_region(head, tail, chars, lines) def untabify_region_event(self, event): head, tail, chars, lines = self.get_region() tabwidth = self._asktabwidth() for pos in range(len(lines)): lines[pos] = lines[pos].expandtabs(tabwidth) self.set_region(head, tail, chars, lines) def toggle_tabs_event(self, event): if self.editwin.askyesno( "Toggle tabs", "Turn tabs " + ("on", "off")[self.usetabs] + "?", parent=self.text): self.usetabs = not self.usetabs return "break" # XXX this isn't bound to anything -- see class tabwidth comments def change_tabwidth_event(self, event): new = self._asktabwidth() if new != self.tabwidth: self.tabwidth = new self.set_indentation_params(0, guess=0) return "break" def change_indentwidth_event(self, event): new = self.editwin.askinteger( "Indent width", "New indent width (1-16)", parent=self.text, initialvalue=self.indentwidth, minvalue=1, maxvalue=16) if new and new != self.indentwidth: self.indentwidth = new return "break" def get_region(self): text = self.text first, last = self.editwin.get_selection_indices() if first and last: head = text.index(first + " linestart") tail = text.index(last + "-1c lineend +1c") else: head = text.index("insert linestart") tail = text.index("insert lineend +1c") chars = text.get(head, tail) lines = chars.split("\n") return head, tail, chars, lines def set_region(self, head, tail, chars, lines): text = self.text newchars = "\n".join(lines) if newchars == chars: text.bell() return text.tag_remove("sel", "1.0", "end") text.mark_set("insert", head) text.undo_block_start() text.delete(head, tail) text.insert(head, newchars) text.undo_block_stop() text.tag_add("sel", head, "insert") # Make string that displays as n leading blanks. def _make_blanks(self, n): if self.usetabs: ntabs, nspaces = divmod(n, self.tabwidth) return '\t' * ntabs + ' ' * nspaces else: return ' ' * n # Delete from beginning of line to insert point, then reinsert # column logical (meaning use tabs if appropriate) spaces. def reindent_to(self, column): text = self.text text.undo_block_start() if text.compare("insert linestart", "!=", "insert"): text.delete("insert linestart", "insert") if column: text.insert("insert", self._make_blanks(column)) text.undo_block_stop() def _asktabwidth(self): return self.editwin.askinteger( "Tab width", "Spaces per tab?", parent=self.text, initialvalue=self.tabwidth, minvalue=1, maxvalue=16) or self.tabwidth # Guess indentwidth from text content. # Return guessed indentwidth. This should not be believed unless # it's in a reasonable range (e.g., it will be 0 if no indented # blocks are found). def guess_indent(self): opener, indented = IndentSearcher(self.text, self.tabwidth).run() if opener and indented: raw, indentsmall = classifyws(opener, self.tabwidth) raw, indentlarge = classifyws(indented, self.tabwidth) else: indentsmall = indentlarge = 0 return indentlarge - indentsmall # "line.col" -> line, as an int def index2line(index): return int(float(index)) # Look at the leading whitespace in s. # Return pair (# of leading ws characters, # effective # of leading blanks after expanding # tabs to width tabwidth) def classifyws(s, tabwidth): raw = effective = 0 for ch in s: if ch == ' ': raw = raw + 1 effective = effective + 1 elif ch == '\t': raw = raw + 1 effective = (effective // tabwidth + 1) * tabwidth else: break return raw, effective class IndentSearcher: # .run() chews over the Text widget, looking for a block opener # and the stmt following it. Returns a pair, # (line containing block opener, line containing stmt) # Either or both may be None. def __init__(self, text, tabwidth): self.text = text self.tabwidth = tabwidth self.i = self.finished = 0 self.blkopenline = self.indentedline = None def readline(self): if self.finished: val = "" else: i = self.i = self.i + 1 mark = repr(i) + ".0" if self.text.compare(mark, ">=", "end"): val = "" else: val = self.text.get(mark, mark + " lineend+1c") # hrm - not sure this is correct in py3k - the source code may have # an encoding declared, but the data will *always* be in # default_scintilla_encoding - so if anyone looks at the encoding decl # in the source they will be wrong. I think. Maybe. Or something... return val.encode(default_scintilla_encoding) def run(self): OPENERS=('class', 'def', 'for', 'if', 'try', 'while') INDENT=tokenize.INDENT NAME=tokenize.NAME save_tabsize = tokenize.tabsize tokenize.tabsize = self.tabwidth try: try: for (typ, token, start, end, line) in token_generator(self.readline): if typ == NAME and token in OPENERS: self.blkopenline = line elif typ == INDENT and self.blkopenline: self.indentedline = line break except (tokenize.TokenError, IndentationError): # since we cut off the tokenizer early, we can trigger # spurious errors pass finally: tokenize.tabsize = save_tabsize return self.blkopenline, self.indentedline
agpl-3.0
pgermain/pbda
pbda_learn.py
1
4558
#!/usr/bin/env python #-*- coding:utf-8 -*- ''' PAC-BAYESIAN DOMAIN ADAPTATION (aka PBDA) Executable script to launch the learning algorithm @author: Pascal Germain -- http://researchers.lille.inria.fr/pgermain/ ''' import common from pbda import * from dataset import * from kernel import * import sys import pickle import argparse common.print_header('LEARNING ALGORITHM') # Arguments parser parser = argparse.ArgumentParser(description="", formatter_class=common.custom_formatter, epilog="") parser.add_argument("-c", dest="C_value", type=float, default=1.0, help="Trade-off parameter \"C\" (source risk modifier). Default: 1.0") parser.add_argument("-a", dest="A_value", type=float, default=1.0, help="Trade-off parameter \"A\" (domain disagreement modifier). Default: 1.0") parser.add_argument("--kernel", "-k", dest="kernel", default="linear", choices=['rbf', 'linear'], help="Kernel function. Default: linear.") parser.add_argument("--gamma", "-g", dest="gamma", type=float, default=1.0, help="Gamma parameter of the RBF kernel. Only used if --kernel is set to rbf. Default: 1.0") parser.add_argument("--nb_restarts", "-n", dest="nb_restarts", type=int, default=1, help='Number of random restarts of the optimization process. Default: 1') parser.add_argument("--format", "-f", dest="format", choices=['matrix', 'svmlight'], default='matrix', help='Datasets format. Default: matrix (each line defines an example, the first column defines the label in {-1, 1}, and the next columns represent the real-valued features)') parser.add_argument("--model", "-m", dest="model_file", default='model.bin', help="Model file name. Default: model.bin") parser.add_argument("--weight", "-w", dest="weight_file", default='', help="Weight vector file name. Default: (none)") parser.add_argument("source_file", help="Defines the file containing the source dataset.") parser.add_argument("target_file", help="Defines the file containing the target dataset.") args = parser.parse_args() # Main program ############################################################################### print('... Loading dataset files ...') ############################################################################### try: if args.format == 'matrix': source_data = dataset_from_matrix_file(args.source_file) elif args.format == 'svmlight': source_data = dataset_from_svmlight_file(args.source_file) except: print('ERROR: Unable to load source file "' + args.source_file + '".') sys.exit(-1) print(str(source_data.get_nb_examples()) + ' source examples loaded.') try: if args.format == 'matrix': target_data = dataset_from_matrix_file(args.target_file) elif args.format == 'svmlight': target_data = dataset_from_svmlight_file(args.target_file, source_data.get_nb_features()) source_data.reshape_features(target_data.get_nb_features()) except: print('ERROR: Unable to load target file "' + args.target_file + '".') sys.exit(-1) print(str(target_data.get_nb_examples()) + ' target examples loaded.') ############################################################################### print('\n... Learning ...') ############################################################################### if args.kernel == 'rbf': kernel = Kernel('rbf', gamma=args.gamma) elif args.kernel == 'linear': kernel = Kernel('linear') algo = Pbda(A=args.A_value, C=args.C_value, verbose=True, nb_restarts=args.nb_restarts ) classifier = algo.learn(source_data, target_data, kernel) ############################################################################### print('\n... Saving model: "' + args.model_file + '" ...') ############################################################################### try: with open(args.model_file, 'wb') as model: pickle.dump(classifier, model, pickle.HIGHEST_PROTOCOL) print('File "' + args.model_file + '" created.') except: print('ERROR: Unable to write model file "' + args.model_file + '".') if len(args.weight_file) > 0: try: classifier.write_to_file(args.weight_file) print('File "' + args.weight_file + '" created.') except: print('ERROR: Unable to write weight file "' + args.weight_file + '".') ############################################################################### print('\n... Computing statistics ...') ############################################################################### stats_dict = algo.get_stats() for key,val in stats_dict.items(): print( str(key) + ' = ' + str(val) )
bsd-2-clause
patrickwind/My_Blog
venv/lib/python2.7/site-packages/werkzeug/formparser.py
295
21205
# -*- coding: utf-8 -*- """ werkzeug.formparser ~~~~~~~~~~~~~~~~~~~ This module implements the form parsing. It supports url-encoded forms as well as non-nested multipart uploads. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import codecs from io import BytesIO from tempfile import TemporaryFile from itertools import chain, repeat, tee from functools import update_wrapper from werkzeug._compat import to_native, text_type from werkzeug.urls import url_decode_stream from werkzeug.wsgi import make_line_iter, \ get_input_stream, get_content_length from werkzeug.datastructures import Headers, FileStorage, MultiDict from werkzeug.http import parse_options_header #: an iterator that yields empty strings _empty_string_iter = repeat('') #: a regular expression for multipart boundaries _multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$') #: supported http encodings that are also available in python we support #: for multipart messages. _supported_multipart_encodings = frozenset(['base64', 'quoted-printable']) def default_stream_factory(total_content_length, filename, content_type, content_length=None): """The stream factory that is used per default.""" if total_content_length > 1024 * 500: return TemporaryFile('wb+') return BytesIO() def parse_form_data(environ, stream_factory=None, charset='utf-8', errors='replace', max_form_memory_size=None, max_content_length=None, cls=None, silent=True): """Parse the form data in the environ and return it as tuple in the form ``(stream, form, files)``. You should only call this method if the transport method is `POST`, `PUT`, or `PATCH`. If the mimetype of the data transmitted is `multipart/form-data` the files multidict will be filled with `FileStorage` objects. If the mimetype is unknown the input stream is wrapped and returned as first argument, else the stream is empty. This is a shortcut for the common usage of :class:`FormDataParser`. Have a look at :ref:`dealing-with-request-data` for more details. .. versionadded:: 0.5 The `max_form_memory_size`, `max_content_length` and `cls` parameters were added. .. versionadded:: 0.5.1 The optional `silent` flag was added. :param environ: the WSGI environment to be used for parsing. :param stream_factory: An optional callable that returns a new read and writeable file descriptor. This callable works the same as :meth:`~BaseResponse._get_file_stream`. :param charset: The character set for URL and url encoded form data. :param errors: The encoding error behavior. :param max_form_memory_size: the maximum number of bytes to be accepted for in-memory stored form data. If the data exceeds the value specified an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param max_content_length: If this is provided and the transmitted data is longer than this value an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param silent: If set to False parsing errors will not be caught. :return: A tuple in the form ``(stream, form, files)``. """ return FormDataParser(stream_factory, charset, errors, max_form_memory_size, max_content_length, cls, silent).parse_from_environ(environ) def exhaust_stream(f): """Helper decorator for methods that exhausts the stream on return.""" def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: exhaust = getattr(stream, 'exhaust', None) if exhaust is not None: exhaust() else: while 1: chunk = stream.read(1024 * 64) if not chunk: break return update_wrapper(wrapper, f) class FormDataParser(object): """This class implements parsing of form data for Werkzeug. By itself it can parse multipart and url encoded form data. It can be subclassed and extended but for most mimetypes it is a better idea to use the untouched stream and expose it as separate attributes on a request object. .. versionadded:: 0.8 :param stream_factory: An optional callable that returns a new read and writeable file descriptor. This callable works the same as :meth:`~BaseResponse._get_file_stream`. :param charset: The character set for URL and url encoded form data. :param errors: The encoding error behavior. :param max_form_memory_size: the maximum number of bytes to be accepted for in-memory stored form data. If the data exceeds the value specified an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param max_content_length: If this is provided and the transmitted data is longer than this value an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param silent: If set to False parsing errors will not be caught. """ def __init__(self, stream_factory=None, charset='utf-8', errors='replace', max_form_memory_size=None, max_content_length=None, cls=None, silent=True): if stream_factory is None: stream_factory = default_stream_factory self.stream_factory = stream_factory self.charset = charset self.errors = errors self.max_form_memory_size = max_form_memory_size self.max_content_length = max_content_length if cls is None: cls = MultiDict self.cls = cls self.silent = silent def get_parse_func(self, mimetype, options): return self.parse_functions.get(mimetype) def parse_from_environ(self, environ): """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ content_type = environ.get('CONTENT_TYPE', '') content_length = get_content_length(environ) mimetype, options = parse_options_header(content_type) return self.parse(get_input_stream(environ), mimetype, content_length, options) def parse(self, stream, mimetype, content_length, options=None): """Parses the information from the given stream, mimetype, content length and mimetype parameters. :param stream: an input stream :param mimetype: the mimetype of the data :param content_length: the content length of the incoming data :param options: optional mimetype parameters (used for the multipart boundary for instance) :return: A tuple in the form ``(stream, form, files)``. """ if self.max_content_length is not None and \ content_length is not None and \ content_length > self.max_content_length: raise exceptions.RequestEntityTooLarge() if options is None: options = {} parse_func = self.get_parse_func(mimetype, options) if parse_func is not None: try: return parse_func(self, stream, mimetype, content_length, options) except ValueError: if not self.silent: raise return stream, self.cls(), self.cls() @exhaust_stream def _parse_multipart(self, stream, mimetype, content_length, options): parser = MultiPartParser(self.stream_factory, self.charset, self.errors, max_form_memory_size=self.max_form_memory_size, cls=self.cls) boundary = options.get('boundary') if boundary is None: raise ValueError('Missing boundary') if isinstance(boundary, text_type): boundary = boundary.encode('ascii') form, files = parser.parse(stream, boundary, content_length) return stream, form, files @exhaust_stream def _parse_urlencoded(self, stream, mimetype, content_length, options): if self.max_form_memory_size is not None and \ content_length is not None and \ content_length > self.max_form_memory_size: raise exceptions.RequestEntityTooLarge() form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls) return stream, form, self.cls() #: mapping of mimetypes to parsing functions parse_functions = { 'multipart/form-data': _parse_multipart, 'application/x-www-form-urlencoded': _parse_urlencoded, 'application/x-url-encoded': _parse_urlencoded } def is_valid_multipart_boundary(boundary): """Checks if the string given is a valid multipart boundary.""" return _multipart_boundary_re.match(boundary) is not None def _line_parse(line): """Removes line ending characters and returns a tuple (`stripped_line`, `is_terminated`). """ if line[-2:] in ['\r\n', b'\r\n']: return line[:-2], True elif line[-1:] in ['\r', '\n', b'\r', b'\n']: return line[:-1], True return line, False def parse_multipart_headers(iterable): """Parses multipart headers from an iterable that yields lines (including the trailing newline symbol). The iterable has to be newline terminated. The iterable will stop at the line where the headers ended so it can be further consumed. :param iterable: iterable of strings that are newline terminated """ result = [] for line in iterable: line = to_native(line) line, line_terminated = _line_parse(line) if not line_terminated: raise ValueError('unexpected end of line in multipart header') if not line: break elif line[0] in ' \t' and result: key, value = result[-1] result[-1] = (key, value + '\n ' + line[1:]) else: parts = line.split(':', 1) if len(parts) == 2: result.append((parts[0].strip(), parts[1].strip())) # we link the list to the headers, no need to create a copy, the # list was not shared anyways. return Headers(result) _begin_form = 'begin_form' _begin_file = 'begin_file' _cont = 'cont' _end = 'end' class MultiPartParser(object): def __init__(self, stream_factory=None, charset='utf-8', errors='replace', max_form_memory_size=None, cls=None, buffer_size=64 * 1024): self.stream_factory = stream_factory self.charset = charset self.errors = errors self.max_form_memory_size = max_form_memory_size if stream_factory is None: stream_factory = default_stream_factory if cls is None: cls = MultiDict self.cls = cls # make sure the buffer size is divisible by four so that we can base64 # decode chunk by chunk assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4' # also the buffer size has to be at least 1024 bytes long or long headers # will freak out the system assert buffer_size >= 1024, 'buffer size has to be at least 1KB' self.buffer_size = buffer_size def _fix_ie_filename(self, filename): """Internet Explorer 6 transmits the full file name if a file is uploaded. This function strips the full path if it thinks the filename is Windows-like absolute. """ if filename[1:3] == ':\\' or filename[:2] == '\\\\': return filename.split('\\')[-1] return filename def _find_terminator(self, iterator): """The terminator might have some additional newlines before it. There is at least one application that sends additional newlines before headers (the python setuptools package). """ for line in iterator: if not line: break line = line.strip() if line: return line return b'' def fail(self, message): raise ValueError(message) def get_part_encoding(self, headers): transfer_encoding = headers.get('content-transfer-encoding') if transfer_encoding is not None and \ transfer_encoding in _supported_multipart_encodings: return transfer_encoding def get_part_charset(self, headers): # Figure out input charset for current part content_type = headers.get('content-type') if content_type: mimetype, ct_params = parse_options_header(content_type) return ct_params.get('charset', self.charset) return self.charset def start_file_streaming(self, filename, headers, total_content_length): if isinstance(filename, bytes): filename = filename.decode(self.charset, self.errors) filename = self._fix_ie_filename(filename) content_type = headers.get('content-type') try: content_length = int(headers['content-length']) except (KeyError, ValueError): content_length = 0 container = self.stream_factory(total_content_length, content_type, filename, content_length) return filename, container def in_memory_threshold_reached(self, bytes): raise exceptions.RequestEntityTooLarge() def validate_boundary(self, boundary): if not boundary: self.fail('Missing boundary') if not is_valid_multipart_boundary(boundary): self.fail('Invalid boundary: %s' % boundary) if len(boundary) > self.buffer_size: # pragma: no cover # this should never happen because we check for a minimum size # of 1024 and boundaries may not be longer than 200. The only # situation when this happens is for non debug builds where # the assert is skipped. self.fail('Boundary longer than buffer size') def parse_lines(self, file, boundary, content_length): """Generate parts of ``('begin_form', (headers, name))`` ``('begin_file', (headers, name, filename))`` ``('cont', bytestring)`` ``('end', None)`` Always obeys the grammar parts = ( begin_form cont* end | begin_file cont* end )* """ next_part = b'--' + boundary last_part = next_part + b'--' iterator = chain(make_line_iter(file, limit=content_length, buffer_size=self.buffer_size), _empty_string_iter) terminator = self._find_terminator(iterator) if terminator == last_part: return elif terminator != next_part: self.fail('Expected boundary at start of multipart data') while terminator != last_part: headers = parse_multipart_headers(iterator) disposition = headers.get('content-disposition') if disposition is None: self.fail('Missing Content-Disposition header') disposition, extra = parse_options_header(disposition) transfer_encoding = self.get_part_encoding(headers) name = extra.get('name') filename = extra.get('filename') # if no content type is given we stream into memory. A list is # used as a temporary container. if filename is None: yield _begin_form, (headers, name) # otherwise we parse the rest of the headers and ask the stream # factory for something we can write in. else: yield _begin_file, (headers, name, filename) buf = b'' for line in iterator: if not line: self.fail('unexpected end of stream') if line[:2] == b'--': terminator = line.rstrip() if terminator in (next_part, last_part): break if transfer_encoding is not None: if transfer_encoding == 'base64': transfer_encoding = 'base64_codec' try: line = codecs.decode(line, transfer_encoding) except Exception: self.fail('could not decode transfer encoded chunk') # we have something in the buffer from the last iteration. # this is usually a newline delimiter. if buf: yield _cont, buf buf = b'' # If the line ends with windows CRLF we write everything except # the last two bytes. In all other cases however we write # everything except the last byte. If it was a newline, that's # fine, otherwise it does not matter because we will write it # the next iteration. this ensures we do not write the # final newline into the stream. That way we do not have to # truncate the stream. However we do have to make sure that # if something else than a newline is in there we write it # out. if line[-2:] == b'\r\n': buf = b'\r\n' cutoff = -2 else: buf = line[-1:] cutoff = -1 yield _cont, line[:cutoff] else: # pragma: no cover raise ValueError('unexpected end of part') # if we have a leftover in the buffer that is not a newline # character we have to flush it, otherwise we will chop of # certain values. if buf not in (b'', b'\r', b'\n', b'\r\n'): yield _cont, buf yield _end, None def parse_parts(self, file, boundary, content_length): """Generate ``('file', (name, val))`` and ``('form', (name, val))`` parts. """ in_memory = 0 for ellt, ell in self.parse_lines(file, boundary, content_length): if ellt == _begin_file: headers, name, filename = ell is_file = True guard_memory = False filename, container = self.start_file_streaming( filename, headers, content_length) _write = container.write elif ellt == _begin_form: headers, name = ell is_file = False container = [] _write = container.append guard_memory = self.max_form_memory_size is not None elif ellt == _cont: _write(ell) # if we write into memory and there is a memory size limit we # count the number of bytes in memory and raise an exception if # there is too much data in memory. if guard_memory: in_memory += len(ell) if in_memory > self.max_form_memory_size: self.in_memory_threshold_reached(in_memory) elif ellt == _end: if is_file: container.seek(0) yield ('file', (name, FileStorage(container, filename, name, headers=headers))) else: part_charset = self.get_part_charset(headers) yield ('form', (name, b''.join(container).decode( part_charset, self.errors))) def parse(self, file, boundary, content_length): formstream, filestream = tee( self.parse_parts(file, boundary, content_length), 2) form = (p[1] for p in formstream if p[0] == 'form') files = (p[1] for p in filestream if p[0] == 'file') return self.cls(form), self.cls(files) from werkzeug import exceptions
gpl-2.0
iut-ibk/DynaMind-UrbanSim
3rdparty/opus/src/urbansim/zone/buildings_SSS_space.py
2
1489
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE from urbansim.gridcell.buildings_SSS_space import buildings_SSS_space as gridcell_buildings_SSS_space class buildings_SSS_space(gridcell_buildings_SSS_space): """Sum of building space of given type across zones.""" id_name = "zone_id" from opus_core.tests import opus_unittest from urbansim.variable_test_toolbox import VariableTestToolbox from numpy import array from numpy import ma class Tests(opus_unittest.OpusTestCase): variable_name = "urbansim.zone.buildings_residential_space" def test_my_inputs(self): """Total number of residential of buildings. """ values = VariableTestToolbox().compute_variable(self.variable_name, data_dictionary = { 'zone':{ 'zone_id':array([1,2,3]), }, 'building': { 'is_building_type_residential':array([1,0,1,0,1,1]), 'zone_id':array([2,3,1,1,2,1]), 'building_size':array([100, 350, 1000, 0, 430, 95]) }, }, dataset = 'zone' ) should_be = array([1095, 530, 0]) self.assert_(ma.allequal(values, should_be), 'Error in ' + self.variable_name) if __name__=='__main__': opus_unittest.main()
gpl-2.0
Nikoli/youtube-dl
youtube_dl/extractor/spiegel.py
30
5415
# encoding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from .spiegeltv import SpiegeltvIE class SpiegelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$' _TESTS = [{ 'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html', 'md5': '2c2754212136f35fb4b19767d242f66e', 'info_dict': { 'id': '1259285', 'ext': 'mp4', 'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv', 'description': 'md5:8029d8310232196eb235d27575a8b9f4', 'duration': 49, }, }, { 'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html', 'md5': 'f2cdf638d7aa47654e251e1aee360af1', 'info_dict': { 'id': '1309159', 'ext': 'mp4', 'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers', 'description': 'md5:c2322b65e58f385a820c10fa03b2d088', 'duration': 983, }, }, { 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html', 'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51', 'info_dict': { 'id': '1519126', 'ext': 'mp4', 'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.', 'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"', } }] def _real_extract(self, url): video_id = self._match_id(url) webpage, handle = self._download_webpage_handle(url, video_id) # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html if SpiegeltvIE.suitable(handle.geturl()): return self.url_result(handle.geturl(), 'Spiegeltv') title = re.sub(r'\s+', ' ', self._html_search_regex( r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>', webpage, 'title')) description = self._html_search_meta('description', webpage, 'description') base_url = self._search_regex( r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL') xml_url = base_url + video_id + '.xml' idoc = self._download_xml(xml_url, video_id) formats = [] for n in list(idoc): if n.tag.startswith('type') and n.tag != 'type6': format_id = n.tag.rpartition('type')[2] video_url = base_url + n.find('./filename').text formats.append({ 'format_id': format_id, 'url': video_url, 'width': int(n.find('./width').text), 'height': int(n.find('./height').text), 'abr': int(n.find('./audiobitrate').text), 'vbr': int(n.find('./videobitrate').text), 'vcodec': n.find('./codec').text, 'acodec': 'MP4A', }) duration = float(idoc[0].findall('./duration')[0].text) self._check_formats(formats, video_id) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'formats': formats, } class SpiegelArticleIE(InfoExtractor): _VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html' IE_NAME = 'Spiegel:Article' IE_DESC = 'Articles on spiegel.de' _TESTS = [{ 'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html', 'info_dict': { 'id': '1516455', 'ext': 'mp4', 'title': 'Faszination Badminton: Nennt es bloß nicht Federball', 'description': 're:^Patrick Kämnitz gehört.{100,}', }, }, { 'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html', 'info_dict': { }, 'playlist_count': 6, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # Single video on top of the page video_link = self._search_regex( r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage, 'video page URL', default=None) if video_link: video_url = compat_urlparse.urljoin( self.http_scheme() + '//spiegel.de/', video_link) return self.url_result(video_url) # Multiple embedded videos embeds = re.findall( r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"', webpage) entries = [ self.url_result(compat_urlparse.urljoin( self.http_scheme() + '//spiegel.de/', embed_path)) for embed_path in embeds ] return self.playlist_result(entries)
unlicense
Lujeni/ansible
lib/ansible/modules/network/f5/bigip_monitor_tcp_half_open.py
38
19942
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_monitor_tcp_half_open short_description: Manages F5 BIG-IP LTM tcp half-open monitors description: Manages F5 BIG-IP LTM tcp half-open monitors. version_added: 2.4 options: name: description: - Monitor name. type: str required: True parent: description: - The parent template of this monitor template. Once this value has been set, it cannot be changed. By default, this value is the C(tcp_half_open) parent on the C(Common) partition. type: str default: /Common/tcp_half_open description: description: - The description of the monitor. type: str version_added: 2.7 ip: description: - IP address part of the IP/port definition. If this parameter is not provided when creating a new monitor, then the default value will be '*'. - If this value is an IP address, and the C(type) is C(tcp) (the default), then a C(port) number must be specified. type: str port: description: - Port address part of the IP/port definition. If this parameter is not provided when creating a new monitor, then the default value will be '*'. Note that if specifying an IP address, a value between 1 and 65535 must be specified type: str version_added: 2.5 interval: description: - The interval specifying how frequently the monitor instance of this template will run. If this parameter is not provided when creating a new monitor, then the default value will be 5. This value B(must) be less than the C(timeout) value. type: int timeout: description: - The number of seconds in which the node or service must respond to the monitor request. If the target responds within the set time period, it is considered up. If the target does not respond within the set time period, it is considered down. You can change this number to any number you want, however, it should be 3 times the interval number of seconds plus 1 second. If this parameter is not provided when creating a new monitor, then the default value will be 16. type: int time_until_up: description: - Specifies the amount of time in seconds after the first successful response before a node will be marked up. A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. If this parameter is not provided when creating a new monitor, then the default value will be 0. type: int partition: description: - Device partition to manage resources on. type: str default: Common version_added: 2.5 state: description: - When C(present), ensures that the monitor exists. - When C(absent), ensures the monitor is removed. type: str choices: - present - absent default: present version_added: 2.5 notes: - Requires BIG-IP software version >= 12 extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Create TCP half-open Monitor bigip_monitor_tcp_half_open: state: present ip: 10.10.10.10 name: my_tcp_monitor provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost - name: Remove TCP half-open Monitor bigip_monitor_tcp_half_open: state: absent name: my_tcp_monitor provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost - name: Add half-open monitor for all addresses, port 514 bigip_monitor_tcp_half_open: port: 514 name: my_tcp_monitor provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' parent: description: New parent template of the monitor. returned: changed type: str sample: tcp description: description: The description of the monitor. returned: changed type: str sample: Important Monitor ip: description: The new IP of IP/port definition. returned: changed type: str sample: 10.12.13.14 interval: description: The new interval in which to run the monitor check. returned: changed type: int sample: 2 timeout: description: The new timeout in which the remote system must respond to the monitor. returned: changed type: int sample: 10 time_until_up: description: The new time in which to mark a system as up after first successful response. returned: changed type: int sample: 2 ''' import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.ipaddress import is_valid_ip from library.module_utils.network.f5.compare import cmp_str_with_none except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.ipaddress import is_valid_ip from ansible.module_utils.network.f5.compare import cmp_str_with_none class Parameters(AnsibleF5Parameters): api_map = { 'timeUntilUp': 'time_until_up', 'defaultsFrom': 'parent', 'recv': 'receive', } api_attributes = [ 'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'destination', 'description', ] returnables = [ 'parent', 'ip', 'port', 'interval', 'timeout', 'time_until_up', 'description', ] updatables = [ 'destination', 'interval', 'timeout', 'time_until_up', 'description', ] @property def destination(self): if self.ip is None and self.port is None: return None destination = '{0}:{1}'.format(self.ip, self.port) return destination @destination.setter def destination(self, value): ip, port = value.split(':') self._values['ip'] = ip self._values['port'] = port @property def interval(self): if self._values['interval'] is None: return None # Per BZ617284, the BIG-IP UI does not raise a warning about this. # So I raise the error instead. if 1 > int(self._values['interval']) > 86400: raise F5ModuleError( "Interval value must be between 1 and 86400" ) return int(self._values['interval']) @property def timeout(self): if self._values['timeout'] is None: return None return int(self._values['timeout']) @property def ip(self): if self._values['ip'] is None: return None elif self._values['ip'] in ['*', '0.0.0.0']: return '*' elif is_valid_ip(self._values['ip']): return self._values['ip'] raise F5ModuleError( "The provided 'ip' parameter is not an IP address." ) @property def port(self): if self._values['port'] is None: return None elif self._values['port'] == '*': return '*' return int(self._values['port']) @property def time_until_up(self): if self._values['time_until_up'] is None: return None return int(self._values['time_until_up']) @property def parent(self): if self._values['parent'] is None: return None if self._values['parent'].startswith('/'): parent = os.path.basename(self._values['parent']) result = '/{0}/{1}'.format(self.partition, parent) else: result = '/{0}/{1}'.format(self.partition, self._values['parent']) return result @property def type(self): return 'tcp_half_open' class ApiParameters(Parameters): @property def description(self): if self._values['description'] in [None, 'none']: return None return self._values['description'] class ModuleParameters(Parameters): @property def description(self): if self._values['description'] is None: return None elif self._values['description'] in ['none', '']: return '' return self._values['description'] class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result @property def parent(self): if self.want.parent != self.have.parent: raise F5ModuleError( "The parent monitor cannot be changed" ) @property def destination(self): if self.want.ip is None and self.want.port is None: return None if self.want.port is None: self.want.update({'port': self.have.port}) if self.want.ip is None: self.want.update({'ip': self.have.ip}) if self.want.port in [None, '*'] and self.want.ip != '*': raise F5ModuleError( "Specifying an IP address requires that a port number be specified" ) if self.want.destination != self.have.destination: return self.want.destination @property def interval(self): if self.want.timeout is not None and self.want.interval is not None: if self.want.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.timeout is not None: if self.have.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.interval is not None: if self.want.interval >= self.have.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) if self.want.interval != self.have.interval: return self.want.interval def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def description(self): return cmp_str_with_none(self.want.description, self.have.description) class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError: return False if resp.status == 404 or 'code' in response and response['code'] == 404: return False return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() self._set_default_creation_values() if self.module.check_mode: return True self.create_on_device() return True def _set_default_creation_values(self): if self.want.timeout is None: self.want.update({'timeout': 16}) if self.want.interval is None: self.want.update({'interval': 5}) if self.want.time_until_up is None: self.want.update({'time_until_up': 0}) if self.want.ip is None: self.want.update({'ip': '*'}) if self.want.port is None: self.want.update({'port': '*'}) def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.delete(uri) if resp.status == 200: return True def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), parent=dict(default='/Common/tcp_half_open'), description=dict(), ip=dict(), port=dict(), interval=dict(type='int'), timeout=dict(type='int'), time_until_up=dict(type='int'), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
boneyao/sentry
src/sentry/migrations/0172_auto__del_field_team_owner.py
34
38017
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'Team.owner' db.delete_column(u'sentry_team', 'owner_id') def backwards(self, orm): # Adding field 'Team.owner' db.add_column(u'sentry_team', 'owner', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.User'], null=True), keep_default=False) models = { 'sentry.accessgroup': { 'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}), 'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}) }, 'sentry.activity': { 'Meta': {'object_name': 'Activity'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.alert': { 'Meta': {'object_name': 'Alert'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.alertrelatedgroup': { 'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'}, 'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.auditlogentry': { 'Meta': {'object_name': 'AuditLogEntry'}, 'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.authidentity': { 'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'}, 'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}), 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.authprovider': { 'Meta': {'object_name': 'AuthProvider'}, 'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.broadcast': { 'Meta': {'object_name': 'Broadcast'}, 'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'}) }, 'sentry.eventmapping': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.file': { 'Meta': {'object_name': 'File'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}), 'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'sentry.groupassignee': { 'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.grouphash': { 'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.grouprulestatus': { 'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}) }, 'sentry.groupseen': { 'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'}) }, 'sentry.grouptagkey': { 'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.grouptagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.helppage': { 'Meta': {'object_name': 'HelpPage'}, 'content': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}), 'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': {'object_name': 'Organization'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.organizationaccessrequest': { 'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationmember': { 'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.organizationmemberteam': { 'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"}, 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}), 'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}) }, 'sentry.project': { 'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'user_added': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.release': { 'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.releasefile': { 'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'}, 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'name': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.rule': { 'Meta': {'object_name': 'Rule'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.tagkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.team': { 'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}), 'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) } } complete_apps = ['sentry']
bsd-3-clause
B-MOOC/edx-platform
common/djangoapps/util/sandboxing.py
162
1617
import re from django.conf import settings # We'll make assets named this be importable by Python code in the sandbox. PYTHON_LIB_ZIP = "python_lib.zip" def can_execute_unsafe_code(course_id): """ Determine if this course is allowed to run unsafe code. For use from the ModuleStore. Checks the `course_id` against a list of whitelisted regexes. Returns a boolean, true if the course can run outside the sandbox. """ # To decide if we can run unsafe code, we check the course id against # a list of regexes configured on the server. # If this is not defined in the environment variables then default to the most restrictive, which # is 'no unsafe courses' # TODO: This should be a database configuration, where we can mark individual courses as being # safe/unsafe. Someone in the future should switch us over to that rather than using regexes # in a settings file # To others using this: the code as-is is brittle and likely to be changed in the future, # as per the TODO, so please consider carefully before adding more values to COURSES_WITH_UNSAFE_CODE for regex in getattr(settings, 'COURSES_WITH_UNSAFE_CODE', []): if re.match(regex, unicode(course_id)): return True return False def get_python_lib_zip(contentstore, course_id): """Return the bytes of the python_lib.zip file, if any.""" asset_key = course_id.make_asset_key("asset", PYTHON_LIB_ZIP) zip_lib = contentstore().find(asset_key, throw_on_not_found=False) if zip_lib is not None: return zip_lib.data else: return None
agpl-3.0
necula01/bond
pybond/tutorials/binary_search_tree/bst.py
1
3646
# A tutorial on binary-search-trees, by Laurent Luce <http://www.laurentluce.com/> # Code from: http://www.laurentluce.com/posts/binary-search-tree-library-in-python/ # # We use this code as the system-under-test class Node: """ Tree node: left and right child + data which can be any object """ def __init__(self, data): """ Node constructor @param data node data object """ self.left = None self.right = None self.data = data def insert(self, data): """ Insert new node with data @param data node data object to insert """ if self.data: if data < self.data: if self.left is None: self.left = Node(data) else: self.left.insert(data) elif data > self.data: if self.right is None: self.right = Node(data) else: self.right.insert(data) else: self.data = data def lookup(self, data, parent=None): """ Lookup node containing data @param data node data object to look up @param parent node's parent @returns node and node's parent if found or None, None """ if data < self.data: if self.left is None: return None, None return self.left.lookup(data, self) elif data > self.data: if self.right is None: return None, None return self.right.lookup(data, self) else: return self, parent def delete(self, data): """ Delete node containing data @param data node's content to delete """ # get node containing data node, parent = self.lookup(data) if node is not None: children_count = node.children_count() else: return None if children_count == 0: # if node has no children, just remove it if parent: if parent.left is node: parent.left = None else: parent.right = None del node else: self.data = None elif children_count == 1: # if node has 1 child # replace node with its child if node.left: n = node.left else: n = node.right if parent: if parent.left is node: parent.left = n else: parent.right = n del node else: self.left = n.left self.right = n.right self.data = n.data else: # if node has 2 children # find its successor parent = node successor = node.right while successor.left: parent = successor successor = successor.left # replace node data by its successor data node.data = successor.data # fix successor's parent's child if parent.left == successor: parent.left = successor.right else: parent.right = successor.right def children_count(self): """ Returns the number of children @returns number of children: 0, 1, 2 """ cnt = 0 if self.left: cnt += 1 if self.right: cnt += 1 return cnt
bsd-2-clause
Sbalbp/DIRAC
DataManagementSystem/Agent/RequestOperations/RegisterFile.py
2
3590
######################################################################## # File: RegisterOperation.py # Author: [email protected] # Date: 2013/03/19 13:55:14 ######################################################################## """ :mod: RegisterFile ================== .. module: RegisterFile :synopsis: register operation handler .. moduleauthor:: [email protected] RegisterFile operation handler """ __RCSID__ = "$Id $" # # # @file RegisterOperation.py # @author [email protected] # @date 2013/03/19 13:55:24 # @brief Definition of RegisterOperation class. # # imports from DIRAC import S_OK, S_ERROR from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase from DIRAC.DataManagementSystem.Client.DataManager import DataManager ######################################################################## class RegisterFile( OperationHandlerBase ): """ .. class:: RegisterOperation RegisterFile operation handler """ def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: Operation instance :param str csPath: CS path for this handler """ OperationHandlerBase.__init__( self, operation, csPath ) # # RegisterFile specific monitor info gMonitor.registerActivity( "RegisterAtt", "Attempted file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterOK", "Successful file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterFail", "Failed file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) def __call__( self ): """ call me maybe """ # # counter for failed files failedFiles = 0 # # catalog to use catalog = self.operation.Catalog dm = DataManager( catalogs = catalog ) # # get waiting files waitingFiles = self.getWaitingFilesList() # # loop over files for opFile in waitingFiles: gMonitor.addMark( "RegisterAtt", 1 ) # # get LFN lfn = opFile.LFN # # and others fileTuple = ( lfn , opFile.PFN, opFile.Size, self.operation.targetSEList[0], opFile.GUID, opFile.Checksum ) # # call DataManager registerFile = dm.registerFile( fileTuple ) # # check results if not registerFile["OK"] or lfn in registerFile["Value"]["Failed"]: gMonitor.addMark( "RegisterFail", 1 ) # self.dataLoggingClient().addFileRecord( lfn, "RegisterFail", catalog, "", "RegisterFile" ) reason = registerFile.get( "Message", registerFile.get( "Value", {} ).get( "Failed", {} ).get( lfn, 'Unknown' ) ) errorStr = "failed to register LFN %s: %s" % ( lfn, reason ) opFile.Error = errorStr self.log.warn( errorStr ) failedFiles += 1 else: gMonitor.addMark( "RegisterOK", 1 ) # self.dataLoggingClient().addFileRecord( lfn, "Register", catalog, "", "RegisterFile" ) self.log.info( "file %s has been registered at %s" % ( lfn, catalog ) ) opFile.Status = "Done" # # final check if failedFiles: self.log.info( "all files processed, %s files failed to register" % failedFiles ) self.operation.Error = "some files failed to register" return S_ERROR( self.operation.Error ) return S_OK()
gpl-3.0
google/google-ctf
third_party/edk2/BaseTools/Source/Python/GenFds/Ffs.py
2
2328
## @file # process FFS generation # # Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## # Import Modules # from Common.DataType import * # mapping between FILE type in FDF and file type for GenFfs FdfFvFileTypeToFileType = { SUP_MODULE_SEC : 'EFI_FV_FILETYPE_SECURITY_CORE', SUP_MODULE_PEI_CORE : 'EFI_FV_FILETYPE_PEI_CORE', SUP_MODULE_PEIM : 'EFI_FV_FILETYPE_PEIM', SUP_MODULE_DXE_CORE : 'EFI_FV_FILETYPE_DXE_CORE', 'FREEFORM' : 'EFI_FV_FILETYPE_FREEFORM', 'DRIVER' : 'EFI_FV_FILETYPE_DRIVER', 'APPLICATION' : 'EFI_FV_FILETYPE_APPLICATION', 'FV_IMAGE' : 'EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE', 'RAW' : 'EFI_FV_FILETYPE_RAW', 'PEI_DXE_COMBO' : 'EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER', 'SMM' : 'EFI_FV_FILETYPE_SMM', SUP_MODULE_SMM_CORE : 'EFI_FV_FILETYPE_SMM_CORE', SUP_MODULE_MM_STANDALONE : 'EFI_FV_FILETYPE_MM_STANDALONE', SUP_MODULE_MM_CORE_STANDALONE : 'EFI_FV_FILETYPE_MM_CORE_STANDALONE' } # mapping between section type in FDF and file suffix SectionSuffix = { BINARY_FILE_TYPE_PE32 : '.pe32', BINARY_FILE_TYPE_PIC : '.pic', BINARY_FILE_TYPE_TE : '.te', BINARY_FILE_TYPE_DXE_DEPEX : '.dpx', 'VERSION' : '.ver', BINARY_FILE_TYPE_UI : '.ui', 'COMPAT16' : '.com16', 'RAW' : '.raw', 'FREEFORM_SUBTYPE_GUID': '.guid', 'SUBTYPE_GUID' : '.guid', 'FV_IMAGE' : 'fv.sec', 'COMPRESS' : '.com', 'GUIDED' : '.guided', BINARY_FILE_TYPE_PEI_DEPEX : '.dpx', BINARY_FILE_TYPE_SMM_DEPEX : '.dpx' }
apache-2.0
zero-rp/miniblink49
third_party/jinja2/sandbox.py
637
13445
# -*- coding: utf-8 -*- """ jinja2.sandbox ~~~~~~~~~~~~~~ Adds a sandbox layer to Jinja as it was the default behavior in the old Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the default behavior is easier to use. The behavior can be changed by subclassing the environment. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ import operator from jinja2.environment import Environment from jinja2.exceptions import SecurityError from jinja2._compat import string_types, function_type, method_type, \ traceback_type, code_type, frame_type, generator_type, PY2 #: maximum number of items a range may produce MAX_RANGE = 100000 #: attributes of function objects that are considered unsafe. UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict', 'func_defaults', 'func_globals']) #: unsafe method attributes. function attributes are unsafe for methods too UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self']) #: unsafe generator attirbutes. UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code']) # On versions > python 2 the special attributes on functions are gone, # but they remain on methods and generators for whatever reason. if not PY2: UNSAFE_FUNCTION_ATTRIBUTES = set() import warnings # make sure we don't warn in python 2.6 about stuff we don't care about warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning, module='jinja2.sandbox') from collections import deque _mutable_set_types = (set,) _mutable_mapping_types = (dict,) _mutable_sequence_types = (list,) # on python 2.x we can register the user collection types try: from UserDict import UserDict, DictMixin from UserList import UserList _mutable_mapping_types += (UserDict, DictMixin) _mutable_set_types += (UserList,) except ImportError: pass # if sets is still available, register the mutable set from there as well try: from sets import Set _mutable_set_types += (Set,) except ImportError: pass #: register Python 2.6 abstract base classes try: from collections import MutableSet, MutableMapping, MutableSequence _mutable_set_types += (MutableSet,) _mutable_mapping_types += (MutableMapping,) _mutable_sequence_types += (MutableSequence,) except ImportError: pass _mutable_spec = ( (_mutable_set_types, frozenset([ 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove', 'symmetric_difference_update', 'update' ])), (_mutable_mapping_types, frozenset([ 'clear', 'pop', 'popitem', 'setdefault', 'update' ])), (_mutable_sequence_types, frozenset([ 'append', 'reverse', 'insert', 'sort', 'extend', 'remove' ])), (deque, frozenset([ 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop', 'popleft', 'remove', 'rotate' ])) ) def safe_range(*args): """A range that can't generate ranges with a length of more than MAX_RANGE items. """ rng = range(*args) if len(rng) > MAX_RANGE: raise OverflowError('range too big, maximum size for range is %d' % MAX_RANGE) return rng def unsafe(f): """Marks a function or method as unsafe. :: @unsafe def delete(self): pass """ f.unsafe_callable = True return f def is_internal_attribute(obj, attr): """Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(lambda: None, "func_code") True >>> is_internal_attribute((lambda x:x).func_code, 'co_code') True >>> is_internal_attribute(str, "upper") False """ if isinstance(obj, function_type): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, method_type): if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): if attr == 'mro': return True elif isinstance(obj, (code_type, traceback_type, frame_type)): return True elif isinstance(obj, generator_type): if attr in UNSAFE_GENERATOR_ATTRIBUTES: return True return attr.startswith('__') def modifies_known_mutable(obj, attr): """This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`. >>> modifies_known_mutable({}, "clear") True >>> modifies_known_mutable({}, "keys") False >>> modifies_known_mutable([], "append") True >>> modifies_known_mutable([], "index") False If called with an unsupported object (such as unicode) `False` is returned. >>> modifies_known_mutable("foo", "upper") False """ for typespec, unsafe in _mutable_spec: if isinstance(obj, typespec): return attr in unsafe return False class SandboxedEnvironment(Environment): """The sandboxed environment. It works like the regular environment but tells the compiler to generate sandboxed code. Additionally subclasses of this environment may override the methods that tell the runtime what attributes or functions are safe to access. If the template tries to access insecure code a :exc:`SecurityError` is raised. However also other exceptions may occour during the rendering so the caller has to ensure that all exceptions are catched. """ sandboxed = True #: default callback table for the binary operators. A copy of this is #: available on each instance of a sandboxed environment as #: :attr:`binop_table` default_binop_table = { '+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv, '//': operator.floordiv, '**': operator.pow, '%': operator.mod } #: default callback table for the unary operators. A copy of this is #: available on each instance of a sandboxed environment as #: :attr:`unop_table` default_unop_table = { '+': operator.pos, '-': operator.neg } #: a set of binary operators that should be intercepted. Each operator #: that is added to this set (empty by default) is delegated to the #: :meth:`call_binop` method that will perform the operator. The default #: operator callback is specified by :attr:`binop_table`. #: #: The following binary operators are interceptable: #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**`` #: #: The default operation form the operator table corresponds to the #: builtin function. Intercepted calls are always slower than the native #: operator call, so make sure only to intercept the ones you are #: interested in. #: #: .. versionadded:: 2.6 intercepted_binops = frozenset() #: a set of unary operators that should be intercepted. Each operator #: that is added to this set (empty by default) is delegated to the #: :meth:`call_unop` method that will perform the operator. The default #: operator callback is specified by :attr:`unop_table`. #: #: The following unary operators are interceptable: ``+``, ``-`` #: #: The default operation form the operator table corresponds to the #: builtin function. Intercepted calls are always slower than the native #: operator call, so make sure only to intercept the ones you are #: interested in. #: #: .. versionadded:: 2.6 intercepted_unops = frozenset() def intercept_unop(self, operator): """Called during template compilation with the name of a unary operator to check if it should be intercepted at runtime. If this method returns `True`, :meth:`call_unop` is excuted for this unary operator. The default implementation of :meth:`call_unop` will use the :attr:`unop_table` dictionary to perform the operator with the same logic as the builtin one. The following unary operators are interceptable: ``+`` and ``-`` Intercepted calls are always slower than the native operator call, so make sure only to intercept the ones you are interested in. .. versionadded:: 2.6 """ return False def __init__(self, *args, **kwargs): Environment.__init__(self, *args, **kwargs) self.globals['range'] = safe_range self.binop_table = self.default_binop_table.copy() self.unop_table = self.default_unop_table.copy() def is_safe_attribute(self, obj, attr, value): """The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ return not (attr.startswith('_') or is_internal_attribute(obj, attr)) def is_safe_callable(self, obj): """Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module. """ return not (getattr(obj, 'unsafe_callable', False) or getattr(obj, 'alters_data', False)) def call_binop(self, context, operator, left, right): """For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.binop_table[operator](left, right) def call_unop(self, context, operator, arg): """For intercepted unary operator calls (:meth:`intercepted_unops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.unop_table[operator](arg) def getitem(self, obj, argument): """Subscribe an object from sandboxed code.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: value = getattr(obj, attr) except AttributeError: pass else: if self.is_safe_attribute(obj, argument, value): return value return self.unsafe_undefined(obj, argument) return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring. """ try: value = getattr(obj, attribute) except AttributeError: try: return obj[attribute] except (TypeError, LookupError): pass else: if self.is_safe_attribute(obj, attribute, value): return value return self.unsafe_undefined(obj, attribute) return self.undefined(obj=obj, name=attribute) def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" return self.undefined('access to attribute %r of %r ' 'object is unsafe.' % ( attribute, obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError) def call(__self, __context, __obj, *args, **kwargs): """Call an object from sandboxed code.""" # the double prefixes are to avoid double keyword argument # errors when proxying the call. if not __self.is_safe_callable(__obj): raise SecurityError('%r is not safely callable' % (__obj,)) return __context.call(__obj, *args, **kwargs) class ImmutableSandboxedEnvironment(SandboxedEnvironment): """Works exactly like the regular `SandboxedEnvironment` but does not permit modifications on the builtin mutable objects `list`, `set`, and `dict` by using the :func:`modifies_known_mutable` function. """ def is_safe_attribute(self, obj, attr, value): if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value): return False return not modifies_known_mutable(obj, attr)
apache-2.0
mgraupe/acq4
acq4/pyqtgraph/graphicsItems/ScatterPlotItem.py
12
37649
from itertools import starmap, repeat try: from itertools import imap except ImportError: imap = map import numpy as np import weakref from ..Qt import QtGui, QtCore, USE_PYSIDE, USE_PYQT5 from ..Point import Point from .. import functions as fn from .GraphicsItem import GraphicsItem from .GraphicsObject import GraphicsObject from .. import getConfigOption from ..pgcollections import OrderedDict from .. import debug from ..python2_3 import basestring __all__ = ['ScatterPlotItem', 'SpotItem'] ## Build all symbol paths Symbols = OrderedDict([(name, QtGui.QPainterPath()) for name in ['o', 's', 't', 't1', 't2', 't3','d', '+', 'x', 'p', 'h', 'star']]) Symbols['o'].addEllipse(QtCore.QRectF(-0.5, -0.5, 1, 1)) Symbols['s'].addRect(QtCore.QRectF(-0.5, -0.5, 1, 1)) coords = { 't': [(-0.5, -0.5), (0, 0.5), (0.5, -0.5)], 't1': [(-0.5, 0.5), (0, -0.5), (0.5, 0.5)], 't2': [(-0.5, -0.5), (-0.5, 0.5), (0.5, 0)], 't3': [(0.5, 0.5), (0.5, -0.5), (-0.5, 0)], 'd': [(0., -0.5), (-0.4, 0.), (0, 0.5), (0.4, 0)], '+': [ (-0.5, -0.05), (-0.5, 0.05), (-0.05, 0.05), (-0.05, 0.5), (0.05, 0.5), (0.05, 0.05), (0.5, 0.05), (0.5, -0.05), (0.05, -0.05), (0.05, -0.5), (-0.05, -0.5), (-0.05, -0.05) ], 'p': [(0, -0.5), (-0.4755, -0.1545), (-0.2939, 0.4045), (0.2939, 0.4045), (0.4755, -0.1545)], 'h': [(0.433, 0.25), (0., 0.5), (-0.433, 0.25), (-0.433, -0.25), (0, -0.5), (0.433, -0.25)], 'star': [(0, -0.5), (-0.1123, -0.1545), (-0.4755, -0.1545), (-0.1816, 0.059), (-0.2939, 0.4045), (0, 0.1910), (0.2939, 0.4045), (0.1816, 0.059), (0.4755, -0.1545), (0.1123, -0.1545)] } for k, c in coords.items(): Symbols[k].moveTo(*c[0]) for x,y in c[1:]: Symbols[k].lineTo(x, y) Symbols[k].closeSubpath() tr = QtGui.QTransform() tr.rotate(45) Symbols['x'] = tr.map(Symbols['+']) def drawSymbol(painter, symbol, size, pen, brush): if symbol is None: return painter.scale(size, size) painter.setPen(pen) painter.setBrush(brush) if isinstance(symbol, basestring): symbol = Symbols[symbol] if np.isscalar(symbol): symbol = list(Symbols.values())[symbol % len(Symbols)] painter.drawPath(symbol) def renderSymbol(symbol, size, pen, brush, device=None): """ Render a symbol specification to QImage. Symbol may be either a QPainterPath or one of the keys in the Symbols dict. If *device* is None, a new QPixmap will be returned. Otherwise, the symbol will be rendered into the device specified (See QPainter documentation for more information). """ ## Render a spot with the given parameters to a pixmap penPxWidth = max(np.ceil(pen.widthF()), 1) if device is None: device = QtGui.QImage(int(size+penPxWidth), int(size+penPxWidth), QtGui.QImage.Format_ARGB32) device.fill(0) p = QtGui.QPainter(device) try: p.setRenderHint(p.Antialiasing) p.translate(device.width()*0.5, device.height()*0.5) drawSymbol(p, symbol, size, pen, brush) finally: p.end() return device def makeSymbolPixmap(size, pen, brush, symbol): ## deprecated img = renderSymbol(symbol, size, pen, brush) return QtGui.QPixmap(img) class SymbolAtlas(object): """ Used to efficiently construct a single QPixmap containing all rendered symbols for a ScatterPlotItem. This is required for fragment rendering. Use example: atlas = SymbolAtlas() sc1 = atlas.getSymbolCoords('o', 5, QPen(..), QBrush(..)) sc2 = atlas.getSymbolCoords('t', 10, QPen(..), QBrush(..)) pm = atlas.getAtlas() """ def __init__(self): # symbol key : QRect(...) coordinates where symbol can be found in atlas. # note that the coordinate list will always be the same list object as # long as the symbol is in the atlas, but the coordinates may # change if the atlas is rebuilt. # weak value; if all external refs to this list disappear, # the symbol will be forgotten. self.symbolMap = weakref.WeakValueDictionary() self.atlasData = None # numpy array of atlas image self.atlas = None # atlas as QPixmap self.atlasValid = False self.max_width=0 def getSymbolCoords(self, opts): """ Given a list of spot records, return an object representing the coordinates of that symbol within the atlas """ sourceRect = np.empty(len(opts), dtype=object) keyi = None sourceRecti = None for i, rec in enumerate(opts): key = (rec[3], rec[2], id(rec[4]), id(rec[5])) # TODO: use string indexes? if key == keyi: sourceRect[i] = sourceRecti else: try: sourceRect[i] = self.symbolMap[key] except KeyError: newRectSrc = QtCore.QRectF() newRectSrc.pen = rec['pen'] newRectSrc.brush = rec['brush'] self.symbolMap[key] = newRectSrc self.atlasValid = False sourceRect[i] = newRectSrc keyi = key sourceRecti = newRectSrc return sourceRect def buildAtlas(self): # get rendered array for all symbols, keep track of avg/max width rendered = {} avgWidth = 0.0 maxWidth = 0 images = [] for key, sourceRect in self.symbolMap.items(): if sourceRect.width() == 0: img = renderSymbol(key[0], key[1], sourceRect.pen, sourceRect.brush) images.append(img) ## we only need this to prevent the images being garbage collected immediately arr = fn.imageToArray(img, copy=False, transpose=False) else: (y,x,h,w) = sourceRect.getRect() arr = self.atlasData[int(x):int(x+w), int(y):int(y+w)] rendered[key] = arr w = arr.shape[0] avgWidth += w maxWidth = max(maxWidth, w) nSymbols = len(rendered) if nSymbols > 0: avgWidth /= nSymbols width = max(maxWidth, avgWidth * (nSymbols**0.5)) else: avgWidth = 0 width = 0 # sort symbols by height symbols = sorted(rendered.keys(), key=lambda x: rendered[x].shape[1], reverse=True) self.atlasRows = [] x = width y = 0 rowheight = 0 for key in symbols: arr = rendered[key] w,h = arr.shape[:2] if x+w > width: y += rowheight x = 0 rowheight = h self.atlasRows.append([y, rowheight, 0]) self.symbolMap[key].setRect(y, x, h, w) x += w self.atlasRows[-1][2] = x height = y + rowheight self.atlasData = np.zeros((int(width), int(height), 4), dtype=np.ubyte) for key in symbols: y, x, h, w = self.symbolMap[key].getRect() self.atlasData[int(x):int(x+w), int(y):int(y+h)] = rendered[key] self.atlas = None self.atlasValid = True self.max_width = maxWidth def getAtlas(self): if not self.atlasValid: self.buildAtlas() if self.atlas is None: if len(self.atlasData) == 0: return QtGui.QPixmap(0,0) img = fn.makeQImage(self.atlasData, copy=False, transpose=False) self.atlas = QtGui.QPixmap(img) return self.atlas class ScatterPlotItem(GraphicsObject): """ Displays a set of x/y points. Instances of this class are created automatically as part of PlotDataItem; these rarely need to be instantiated directly. The size, shape, pen, and fill brush may be set for each point individually or for all points. ======================== =============================================== **Signals:** sigPlotChanged(self) Emitted when the data being plotted has changed sigClicked(self, points) Emitted when the curve is clicked. Sends a list of all the points under the mouse pointer. ======================== =============================================== """ #sigPointClicked = QtCore.Signal(object, object) sigClicked = QtCore.Signal(object, object) ## self, points sigPlotChanged = QtCore.Signal(object) def __init__(self, *args, **kargs): """ Accepts the same arguments as setData() """ profiler = debug.Profiler() GraphicsObject.__init__(self) self.picture = None # QPicture used for rendering when pxmode==False self.fragmentAtlas = SymbolAtlas() self.data = np.empty(0, dtype=[('x', float), ('y', float), ('size', float), ('symbol', object), ('pen', object), ('brush', object), ('data', object), ('item', object), ('sourceRect', object), ('targetRect', object), ('width', float)]) self.bounds = [None, None] ## caches data bounds self._maxSpotWidth = 0 ## maximum size of the scale-variant portion of all spots self._maxSpotPxWidth = 0 ## maximum size of the scale-invariant portion of all spots self.opts = { 'pxMode': True, 'useCache': True, ## If useCache is False, symbols are re-drawn on every paint. 'antialias': getConfigOption('antialias'), 'name': None, } self.setPen(fn.mkPen(getConfigOption('foreground')), update=False) self.setBrush(fn.mkBrush(100,100,150), update=False) self.setSymbol('o', update=False) self.setSize(7, update=False) profiler() self.setData(*args, **kargs) profiler('setData') #self.setCacheMode(self.DeviceCoordinateCache) def setData(self, *args, **kargs): """ **Ordered Arguments:** * If there is only one unnamed argument, it will be interpreted like the 'spots' argument. * If there are two unnamed arguments, they will be interpreted as sequences of x and y values. ====================== =============================================================================================== **Keyword Arguments:** *spots* Optional list of dicts. Each dict specifies parameters for a single spot: {'pos': (x,y), 'size', 'pen', 'brush', 'symbol'}. This is just an alternate method of passing in data for the corresponding arguments. *x*,*y* 1D arrays of x,y values. *pos* 2D structure of x,y pairs (such as Nx2 array or list of tuples) *pxMode* If True, spots are always the same size regardless of scaling, and size is given in px. Otherwise, size is in scene coordinates and the spots scale with the view. Default is True *symbol* can be one (or a list) of: * 'o' circle (default) * 's' square * 't' triangle * 'd' diamond * '+' plus * any QPainterPath to specify custom symbol shapes. To properly obey the position and size, custom symbols should be centered at (0,0) and width and height of 1.0. Note that it is also possible to 'install' custom shapes by setting ScatterPlotItem.Symbols[key] = shape. *pen* The pen (or list of pens) to use for drawing spot outlines. *brush* The brush (or list of brushes) to use for filling spots. *size* The size (or list of sizes) of spots. If *pxMode* is True, this value is in pixels. Otherwise, it is in the item's local coordinate system. *data* a list of python objects used to uniquely identify each spot. *identical* *Deprecated*. This functionality is handled automatically now. *antialias* Whether to draw symbols with antialiasing. Note that if pxMode is True, symbols are always rendered with antialiasing (since the rendered symbols can be cached, this incurs very little performance cost) *name* The name of this item. Names are used for automatically generating LegendItem entries and by some exporters. ====================== =============================================================================================== """ oldData = self.data ## this causes cached pixmaps to be preserved while new data is registered. self.clear() ## clear out all old data self.addPoints(*args, **kargs) def addPoints(self, *args, **kargs): """ Add new points to the scatter plot. Arguments are the same as setData() """ ## deal with non-keyword arguments if len(args) == 1: kargs['spots'] = args[0] elif len(args) == 2: kargs['x'] = args[0] kargs['y'] = args[1] elif len(args) > 2: raise Exception('Only accepts up to two non-keyword arguments.') ## convert 'pos' argument to 'x' and 'y' if 'pos' in kargs: pos = kargs['pos'] if isinstance(pos, np.ndarray): kargs['x'] = pos[:,0] kargs['y'] = pos[:,1] else: x = [] y = [] for p in pos: if isinstance(p, QtCore.QPointF): x.append(p.x()) y.append(p.y()) else: x.append(p[0]) y.append(p[1]) kargs['x'] = x kargs['y'] = y ## determine how many spots we have if 'spots' in kargs: numPts = len(kargs['spots']) elif 'y' in kargs and kargs['y'] is not None: numPts = len(kargs['y']) else: kargs['x'] = [] kargs['y'] = [] numPts = 0 ## Extend record array oldData = self.data self.data = np.empty(len(oldData)+numPts, dtype=self.data.dtype) ## note that np.empty initializes object fields to None and string fields to '' self.data[:len(oldData)] = oldData #for i in range(len(oldData)): #oldData[i]['item']._data = self.data[i] ## Make sure items have proper reference to new array newData = self.data[len(oldData):] newData['size'] = -1 ## indicates to use default size if 'spots' in kargs: spots = kargs['spots'] for i in range(len(spots)): spot = spots[i] for k in spot: if k == 'pos': pos = spot[k] if isinstance(pos, QtCore.QPointF): x,y = pos.x(), pos.y() else: x,y = pos[0], pos[1] newData[i]['x'] = x newData[i]['y'] = y elif k == 'pen': newData[i][k] = fn.mkPen(spot[k]) elif k == 'brush': newData[i][k] = fn.mkBrush(spot[k]) elif k in ['x', 'y', 'size', 'symbol', 'brush', 'data']: newData[i][k] = spot[k] else: raise Exception("Unknown spot parameter: %s" % k) elif 'y' in kargs: newData['x'] = kargs['x'] newData['y'] = kargs['y'] if 'pxMode' in kargs: self.setPxMode(kargs['pxMode']) if 'antialias' in kargs: self.opts['antialias'] = kargs['antialias'] ## Set any extra parameters provided in keyword arguments for k in ['pen', 'brush', 'symbol', 'size']: if k in kargs: setMethod = getattr(self, 'set' + k[0].upper() + k[1:]) setMethod(kargs[k], update=False, dataSet=newData, mask=kargs.get('mask', None)) if 'data' in kargs: self.setPointData(kargs['data'], dataSet=newData) self.prepareGeometryChange() self.informViewBoundsChanged() self.bounds = [None, None] self.invalidate() self.updateSpots(newData) self.sigPlotChanged.emit(self) def invalidate(self): ## clear any cached drawing state self.picture = None self.update() def getData(self): return self.data['x'], self.data['y'] def setPoints(self, *args, **kargs): ##Deprecated; use setData return self.setData(*args, **kargs) def implements(self, interface=None): ints = ['plotData'] if interface is None: return ints return interface in ints def name(self): return self.opts.get('name', None) def setPen(self, *args, **kargs): """Set the pen(s) used to draw the outline around each spot. If a list or array is provided, then the pen for each spot will be set separately. Otherwise, the arguments are passed to pg.mkPen and used as the default pen for all spots which do not have a pen explicitly set.""" update = kargs.pop('update', True) dataSet = kargs.pop('dataSet', self.data) if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)): pens = args[0] if 'mask' in kargs and kargs['mask'] is not None: pens = pens[kargs['mask']] if len(pens) != len(dataSet): raise Exception("Number of pens does not match number of points (%d != %d)" % (len(pens), len(dataSet))) dataSet['pen'] = pens else: self.opts['pen'] = fn.mkPen(*args, **kargs) dataSet['sourceRect'] = None if update: self.updateSpots(dataSet) def setBrush(self, *args, **kargs): """Set the brush(es) used to fill the interior of each spot. If a list or array is provided, then the brush for each spot will be set separately. Otherwise, the arguments are passed to pg.mkBrush and used as the default brush for all spots which do not have a brush explicitly set.""" update = kargs.pop('update', True) dataSet = kargs.pop('dataSet', self.data) if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)): brushes = args[0] if 'mask' in kargs and kargs['mask'] is not None: brushes = brushes[kargs['mask']] if len(brushes) != len(dataSet): raise Exception("Number of brushes does not match number of points (%d != %d)" % (len(brushes), len(dataSet))) dataSet['brush'] = brushes else: self.opts['brush'] = fn.mkBrush(*args, **kargs) #self._spotPixmap = None dataSet['sourceRect'] = None if update: self.updateSpots(dataSet) def setSymbol(self, symbol, update=True, dataSet=None, mask=None): """Set the symbol(s) used to draw each spot. If a list or array is provided, then the symbol for each spot will be set separately. Otherwise, the argument will be used as the default symbol for all spots which do not have a symbol explicitly set.""" if dataSet is None: dataSet = self.data if isinstance(symbol, np.ndarray) or isinstance(symbol, list): symbols = symbol if mask is not None: symbols = symbols[mask] if len(symbols) != len(dataSet): raise Exception("Number of symbols does not match number of points (%d != %d)" % (len(symbols), len(dataSet))) dataSet['symbol'] = symbols else: self.opts['symbol'] = symbol self._spotPixmap = None dataSet['sourceRect'] = None if update: self.updateSpots(dataSet) def setSize(self, size, update=True, dataSet=None, mask=None): """Set the size(s) used to draw each spot. If a list or array is provided, then the size for each spot will be set separately. Otherwise, the argument will be used as the default size for all spots which do not have a size explicitly set.""" if dataSet is None: dataSet = self.data if isinstance(size, np.ndarray) or isinstance(size, list): sizes = size if mask is not None: sizes = sizes[mask] if len(sizes) != len(dataSet): raise Exception("Number of sizes does not match number of points (%d != %d)" % (len(sizes), len(dataSet))) dataSet['size'] = sizes else: self.opts['size'] = size self._spotPixmap = None dataSet['sourceRect'] = None if update: self.updateSpots(dataSet) def setPointData(self, data, dataSet=None, mask=None): if dataSet is None: dataSet = self.data if isinstance(data, np.ndarray) or isinstance(data, list): if mask is not None: data = data[mask] if len(data) != len(dataSet): raise Exception("Length of meta data does not match number of points (%d != %d)" % (len(data), len(dataSet))) ## Bug: If data is a numpy record array, then items from that array must be copied to dataSet one at a time. ## (otherwise they are converted to tuples and thus lose their field names. if isinstance(data, np.ndarray) and (data.dtype.fields is not None)and len(data.dtype.fields) > 1: for i, rec in enumerate(data): dataSet['data'][i] = rec else: dataSet['data'] = data def setPxMode(self, mode): if self.opts['pxMode'] == mode: return self.opts['pxMode'] = mode self.invalidate() def updateSpots(self, dataSet=None): if dataSet is None: dataSet = self.data invalidate = False if self.opts['pxMode']: mask = np.equal(dataSet['sourceRect'], None) if np.any(mask): invalidate = True opts = self.getSpotOpts(dataSet[mask]) sourceRect = self.fragmentAtlas.getSymbolCoords(opts) dataSet['sourceRect'][mask] = sourceRect self.fragmentAtlas.getAtlas() # generate atlas so source widths are available. dataSet['width'] = np.array(list(imap(QtCore.QRectF.width, dataSet['sourceRect'])))/2 dataSet['targetRect'] = None self._maxSpotPxWidth = self.fragmentAtlas.max_width else: self._maxSpotWidth = 0 self._maxSpotPxWidth = 0 self.measureSpotSizes(dataSet) if invalidate: self.invalidate() def getSpotOpts(self, recs, scale=1.0): if recs.ndim == 0: rec = recs symbol = rec['symbol'] if symbol is None: symbol = self.opts['symbol'] size = rec['size'] if size < 0: size = self.opts['size'] pen = rec['pen'] if pen is None: pen = self.opts['pen'] brush = rec['brush'] if brush is None: brush = self.opts['brush'] return (symbol, size*scale, fn.mkPen(pen), fn.mkBrush(brush)) else: recs = recs.copy() recs['symbol'][np.equal(recs['symbol'], None)] = self.opts['symbol'] recs['size'][np.equal(recs['size'], -1)] = self.opts['size'] recs['size'] *= scale recs['pen'][np.equal(recs['pen'], None)] = fn.mkPen(self.opts['pen']) recs['brush'][np.equal(recs['brush'], None)] = fn.mkBrush(self.opts['brush']) return recs def measureSpotSizes(self, dataSet): for rec in dataSet: ## keep track of the maximum spot size and pixel size symbol, size, pen, brush = self.getSpotOpts(rec) width = 0 pxWidth = 0 if self.opts['pxMode']: pxWidth = size + pen.widthF() else: width = size if pen.isCosmetic(): pxWidth += pen.widthF() else: width += pen.widthF() self._maxSpotWidth = max(self._maxSpotWidth, width) self._maxSpotPxWidth = max(self._maxSpotPxWidth, pxWidth) self.bounds = [None, None] def clear(self): """Remove all spots from the scatter plot""" #self.clearItems() self.data = np.empty(0, dtype=self.data.dtype) self.bounds = [None, None] self.invalidate() def dataBounds(self, ax, frac=1.0, orthoRange=None): if frac >= 1.0 and orthoRange is None and self.bounds[ax] is not None: return self.bounds[ax] #self.prepareGeometryChange() if self.data is None or len(self.data) == 0: return (None, None) if ax == 0: d = self.data['x'] d2 = self.data['y'] elif ax == 1: d = self.data['y'] d2 = self.data['x'] if orthoRange is not None: mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1]) d = d[mask] d2 = d2[mask] if frac >= 1.0: self.bounds[ax] = (np.nanmin(d) - self._maxSpotWidth*0.7072, np.nanmax(d) + self._maxSpotWidth*0.7072) return self.bounds[ax] elif frac <= 0.0: raise Exception("Value for parameter 'frac' must be > 0. (got %s)" % str(frac)) else: mask = np.isfinite(d) d = d[mask] return np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)]) def pixelPadding(self): return self._maxSpotPxWidth*0.7072 def boundingRect(self): (xmn, xmx) = self.dataBounds(ax=0) (ymn, ymx) = self.dataBounds(ax=1) if xmn is None or xmx is None: xmn = 0 xmx = 0 if ymn is None or ymx is None: ymn = 0 ymx = 0 px = py = 0.0 pxPad = self.pixelPadding() if pxPad > 0: # determine length of pixel in local x, y directions px, py = self.pixelVectors() try: px = 0 if px is None else px.length() except OverflowError: px = 0 try: py = 0 if py is None else py.length() except OverflowError: py = 0 # return bounds expanded by pixel size px *= pxPad py *= pxPad return QtCore.QRectF(xmn-px, ymn-py, (2*px)+xmx-xmn, (2*py)+ymx-ymn) def viewTransformChanged(self): self.prepareGeometryChange() GraphicsObject.viewTransformChanged(self) self.bounds = [None, None] self.data['targetRect'] = None def setExportMode(self, *args, **kwds): GraphicsObject.setExportMode(self, *args, **kwds) self.invalidate() def mapPointsToDevice(self, pts): # Map point locations to device tr = self.deviceTransform() if tr is None: return None #pts = np.empty((2,len(self.data['x']))) #pts[0] = self.data['x'] #pts[1] = self.data['y'] pts = fn.transformCoordinates(tr, pts) pts -= self.data['width'] pts = np.clip(pts, -2**30, 2**30) ## prevent Qt segmentation fault. return pts def getViewMask(self, pts): # Return bool mask indicating all points that are within viewbox # pts is expressed in *device coordiantes* vb = self.getViewBox() if vb is None: return None viewBounds = vb.mapRectToDevice(vb.boundingRect()) w = self.data['width'] mask = ((pts[0] + w > viewBounds.left()) & (pts[0] - w < viewBounds.right()) & (pts[1] + w > viewBounds.top()) & (pts[1] - w < viewBounds.bottom())) ## remove out of view points return mask @debug.warnOnException ## raising an exception here causes crash def paint(self, p, *args): #p.setPen(fn.mkPen('r')) #p.drawRect(self.boundingRect()) if self._exportOpts is not False: aa = self._exportOpts.get('antialias', True) scale = self._exportOpts.get('resolutionScale', 1.0) ## exporting to image; pixel resolution may have changed else: aa = self.opts['antialias'] scale = 1.0 if self.opts['pxMode'] is True: p.resetTransform() # Map point coordinates to device pts = np.vstack([self.data['x'], self.data['y']]) pts = self.mapPointsToDevice(pts) if pts is None: return # Cull points that are outside view viewMask = self.getViewMask(pts) #pts = pts[:,mask] #data = self.data[mask] if self.opts['useCache'] and self._exportOpts is False: # Draw symbols from pre-rendered atlas atlas = self.fragmentAtlas.getAtlas() # Update targetRects if necessary updateMask = viewMask & np.equal(self.data['targetRect'], None) if np.any(updateMask): updatePts = pts[:,updateMask] width = self.data[updateMask]['width']*2 self.data['targetRect'][updateMask] = list(imap(QtCore.QRectF, updatePts[0,:], updatePts[1,:], width, width)) data = self.data[viewMask] if USE_PYSIDE or USE_PYQT5: list(imap(p.drawPixmap, data['targetRect'], repeat(atlas), data['sourceRect'])) else: p.drawPixmapFragments(data['targetRect'].tolist(), data['sourceRect'].tolist(), atlas) else: # render each symbol individually p.setRenderHint(p.Antialiasing, aa) data = self.data[viewMask] pts = pts[:,viewMask] for i, rec in enumerate(data): p.resetTransform() p.translate(pts[0,i] + rec['width'], pts[1,i] + rec['width']) drawSymbol(p, *self.getSpotOpts(rec, scale)) else: if self.picture is None: self.picture = QtGui.QPicture() p2 = QtGui.QPainter(self.picture) for rec in self.data: if scale != 1.0: rec = rec.copy() rec['size'] *= scale p2.resetTransform() p2.translate(rec['x'], rec['y']) drawSymbol(p2, *self.getSpotOpts(rec, scale)) p2.end() p.setRenderHint(p.Antialiasing, aa) self.picture.play(p) def points(self): for rec in self.data: if rec['item'] is None: rec['item'] = SpotItem(rec, self) return self.data['item'] def pointsAt(self, pos): x = pos.x() y = pos.y() pw = self.pixelWidth() ph = self.pixelHeight() pts = [] for s in self.points(): sp = s.pos() ss = s.size() sx = sp.x() sy = sp.y() s2x = s2y = ss * 0.5 if self.opts['pxMode']: s2x *= pw s2y *= ph if x > sx-s2x and x < sx+s2x and y > sy-s2y and y < sy+s2y: pts.append(s) #print "HIT:", x, y, sx, sy, s2x, s2y #else: #print "No hit:", (x, y), (sx, sy) #print " ", (sx-s2x, sy-s2y), (sx+s2x, sy+s2y) return pts[::-1] def mouseClickEvent(self, ev): if ev.button() == QtCore.Qt.LeftButton: pts = self.pointsAt(ev.pos()) if len(pts) > 0: self.ptsClicked = pts self.sigClicked.emit(self, self.ptsClicked) ev.accept() else: #print "no spots" ev.ignore() else: ev.ignore() class SpotItem(object): """ Class referring to individual spots in a scatter plot. These can be retrieved by calling ScatterPlotItem.points() or by connecting to the ScatterPlotItem's click signals. """ def __init__(self, data, plot): #GraphicsItem.__init__(self, register=False) self._data = data self._plot = plot #self.setParentItem(plot) #self.setPos(QtCore.QPointF(data['x'], data['y'])) #self.updateItem() def data(self): """Return the user data associated with this spot.""" return self._data['data'] def size(self): """Return the size of this spot. If the spot has no explicit size set, then return the ScatterPlotItem's default size instead.""" if self._data['size'] == -1: return self._plot.opts['size'] else: return self._data['size'] def pos(self): return Point(self._data['x'], self._data['y']) def viewPos(self): return self._plot.mapToView(self.pos()) def setSize(self, size): """Set the size of this spot. If the size is set to -1, then the ScatterPlotItem's default size will be used instead.""" self._data['size'] = size self.updateItem() def symbol(self): """Return the symbol of this spot. If the spot has no explicit symbol set, then return the ScatterPlotItem's default symbol instead. """ symbol = self._data['symbol'] if symbol is None: symbol = self._plot.opts['symbol'] try: n = int(symbol) symbol = list(Symbols.keys())[n % len(Symbols)] except: pass return symbol def setSymbol(self, symbol): """Set the symbol for this spot. If the symbol is set to '', then the ScatterPlotItem's default symbol will be used instead.""" self._data['symbol'] = symbol self.updateItem() def pen(self): pen = self._data['pen'] if pen is None: pen = self._plot.opts['pen'] return fn.mkPen(pen) def setPen(self, *args, **kargs): """Set the outline pen for this spot""" pen = fn.mkPen(*args, **kargs) self._data['pen'] = pen self.updateItem() def resetPen(self): """Remove the pen set for this spot; the scatter plot's default pen will be used instead.""" self._data['pen'] = None ## Note this is NOT the same as calling setPen(None) self.updateItem() def brush(self): brush = self._data['brush'] if brush is None: brush = self._plot.opts['brush'] return fn.mkBrush(brush) def setBrush(self, *args, **kargs): """Set the fill brush for this spot""" brush = fn.mkBrush(*args, **kargs) self._data['brush'] = brush self.updateItem() def resetBrush(self): """Remove the brush set for this spot; the scatter plot's default brush will be used instead.""" self._data['brush'] = None ## Note this is NOT the same as calling setBrush(None) self.updateItem() def setData(self, data): """Set the user-data associated with this spot""" self._data['data'] = data def updateItem(self): self._data['sourceRect'] = None self._plot.updateSpots(self._data.reshape(1)) self._plot.invalidate() #class PixmapSpotItem(SpotItem, QtGui.QGraphicsPixmapItem): #def __init__(self, data, plot): #QtGui.QGraphicsPixmapItem.__init__(self) #self.setFlags(self.flags() | self.ItemIgnoresTransformations) #SpotItem.__init__(self, data, plot) #def setPixmap(self, pixmap): #QtGui.QGraphicsPixmapItem.setPixmap(self, pixmap) #self.setOffset(-pixmap.width()/2.+0.5, -pixmap.height()/2.) #def updateItem(self): #symbolOpts = (self._data['pen'], self._data['brush'], self._data['size'], self._data['symbol']) ### If all symbol options are default, use default pixmap #if symbolOpts == (None, None, -1, ''): #pixmap = self._plot.defaultSpotPixmap() #else: #pixmap = makeSymbolPixmap(size=self.size(), pen=self.pen(), brush=self.brush(), symbol=self.symbol()) #self.setPixmap(pixmap) #class PathSpotItem(SpotItem, QtGui.QGraphicsPathItem): #def __init__(self, data, plot): #QtGui.QGraphicsPathItem.__init__(self) #SpotItem.__init__(self, data, plot) #def updateItem(self): #QtGui.QGraphicsPathItem.setPath(self, Symbols[self.symbol()]) #QtGui.QGraphicsPathItem.setPen(self, self.pen()) #QtGui.QGraphicsPathItem.setBrush(self, self.brush()) #size = self.size() #self.resetTransform() #self.scale(size, size)
mit
velorientc/git_test8
git_remote_helpers/git/git.py
44
24712
#!/usr/bin/env python """Functionality for interacting with Git repositories. This module provides classes for interfacing with a Git repository. """ import os import re import time from binascii import hexlify from cStringIO import StringIO import unittest from git_remote_helpers.util import debug, error, die, start_command, run_command def get_git_dir (): """Return the path to the GIT_DIR for this repo.""" args = ("git", "rev-parse", "--git-dir") exit_code, output, errors = run_command(args) if exit_code: die("Failed to retrieve git dir") assert not errors return output.strip() def parse_git_config (): """Return a dict containing the parsed version of 'git config -l'.""" exit_code, output, errors = run_command(("git", "config", "-z", "-l")) if exit_code: die("Failed to retrieve git configuration") assert not errors return dict([e.split('\n', 1) for e in output.split("\0") if e]) def git_config_bool (value): """Convert the given git config string value to True or False. Raise ValueError if the given string was not recognized as a boolean value. """ norm_value = str(value).strip().lower() if norm_value in ("true", "1", "yes", "on", ""): return True if norm_value in ("false", "0", "no", "off", "none"): return False raise ValueError("Failed to parse '%s' into a boolean value" % (value)) def valid_git_ref (ref_name): """Return True iff the given ref name is a valid git ref name.""" # The following is a reimplementation of the git check-ref-format # command. The rules were derived from the git check-ref-format(1) # manual page. This code should be replaced by a call to # check_refname_format() in the git library, when such is available. if ref_name.endswith('/') or \ ref_name.startswith('.') or \ ref_name.count('/.') or \ ref_name.count('..') or \ ref_name.endswith('.lock'): return False for c in ref_name: if ord(c) < 0x20 or ord(c) == 0x7f or c in " ~^:?*[": return False return True class GitObjectFetcher(object): """Provide parsed access to 'git cat-file --batch'. This provides a read-only interface to the Git object database. """ def __init__ (self): """Initiate a 'git cat-file --batch' session.""" self.queue = [] # List of object names to be submitted self.in_transit = None # Object name currently in transit # 'git cat-file --batch' produces binary output which is likely # to be corrupted by the default "rU"-mode pipe opened by # start_command. (Mode == "rU" does universal new-line # conversion, which mangles carriage returns.) Therefore, we # open an explicitly binary-safe pipe for transferring the # output from 'git cat-file --batch'. pipe_r_fd, pipe_w_fd = os.pipe() pipe_r = os.fdopen(pipe_r_fd, "rb") pipe_w = os.fdopen(pipe_w_fd, "wb") self.proc = start_command(("git", "cat-file", "--batch"), stdout = pipe_w) self.f = pipe_r def __del__ (self): """Verify completed communication with 'git cat-file --batch'.""" assert not self.queue assert self.in_transit is None self.proc.stdin.close() assert self.proc.wait() == 0 # Zero exit code assert self.f.read() == "" # No remaining output def _submit_next_object (self): """Submit queue items to the 'git cat-file --batch' process. If there are items in the queue, and there is currently no item currently in 'transit', then pop the first item off the queue, and submit it. """ if self.queue and self.in_transit is None: self.in_transit = self.queue.pop(0) print >> self.proc.stdin, self.in_transit[0] def push (self, obj, callback): """Push the given object name onto the queue. The given callback function will at some point in the future be called exactly once with the following arguments: - self - this GitObjectFetcher instance - obj - the object name provided to push() - sha1 - the SHA1 of the object, if 'None' obj is missing - t - the type of the object (tag/commit/tree/blob) - size - the size of the object in bytes - data - the object contents """ self.queue.append((obj, callback)) self._submit_next_object() # (Re)start queue processing def process_next_entry (self): """Read the next entry off the queue and invoke callback.""" obj, cb = self.in_transit self.in_transit = None header = self.f.readline() if header == "%s missing\n" % (obj): cb(self, obj, None, None, None, None) return sha1, t, size = header.split(" ") assert len(sha1) == 40 assert t in ("tag", "commit", "tree", "blob") assert size.endswith("\n") size = int(size.strip()) data = self.f.read(size) assert self.f.read(1) == "\n" cb(self, obj, sha1, t, size, data) self._submit_next_object() def process (self): """Process the current queue until empty.""" while self.in_transit is not None: self.process_next_entry() # High-level convenience methods: def get_sha1 (self, objspec): """Return the SHA1 of the object specified by 'objspec'. Return None if 'objspec' does not specify an existing object. """ class _ObjHandler(object): """Helper class for getting the returned SHA1.""" def __init__ (self, parser): self.parser = parser self.sha1 = None def __call__ (self, parser, obj, sha1, t, size, data): # FIXME: Many unused arguments. Could this be cheaper? assert parser == self.parser self.sha1 = sha1 handler = _ObjHandler(self) self.push(objspec, handler) self.process() return handler.sha1 def open_obj (self, objspec): """Return a file object wrapping the contents of a named object. The caller is responsible for calling .close() on the returned file object. Raise KeyError if 'objspec' does not exist in the repo. """ class _ObjHandler(object): """Helper class for parsing the returned git object.""" def __init__ (self, parser): """Set up helper.""" self.parser = parser self.contents = StringIO() self.err = None def __call__ (self, parser, obj, sha1, t, size, data): """Git object callback (see GitObjectFetcher documentation).""" assert parser == self.parser if not sha1: # Missing object self.err = "Missing object '%s'" % obj else: assert size == len(data) self.contents.write(data) handler = _ObjHandler(self) self.push(objspec, handler) self.process() if handler.err: raise KeyError(handler.err) handler.contents.seek(0) return handler.contents def walk_tree (self, tree_objspec, callback, prefix = ""): """Recursively walk the given Git tree object. Recursively walk all subtrees of the given tree object, and invoke the given callback passing three arguments: (path, mode, data) with the path, permission bits, and contents of all the blobs found in the entire tree structure. """ class _ObjHandler(object): """Helper class for walking a git tree structure.""" def __init__ (self, parser, cb, path, mode = None): """Set up helper.""" self.parser = parser self.cb = cb self.path = path self.mode = mode self.err = None def parse_tree (self, treedata): """Parse tree object data, yield tree entries. Each tree entry is a 3-tuple (mode, sha1, path) self.path is prepended to all paths yielded from this method. """ while treedata: mode = int(treedata[:6], 10) # Turn 100xxx into xxx if mode > 100000: mode -= 100000 assert treedata[6] == " " i = treedata.find("\0", 7) assert i > 0 path = treedata[7:i] sha1 = hexlify(treedata[i + 1: i + 21]) yield (mode, sha1, self.path + path) treedata = treedata[i + 21:] def __call__ (self, parser, obj, sha1, t, size, data): """Git object callback (see GitObjectFetcher documentation).""" assert parser == self.parser if not sha1: # Missing object self.err = "Missing object '%s'" % (obj) return assert size == len(data) if t == "tree": if self.path: self.path += "/" # Recurse into all blobs and subtrees for m, s, p in self.parse_tree(data): parser.push(s, self.__class__(self.parser, self.cb, p, m)) elif t == "blob": self.cb(self.path, self.mode, data) else: raise ValueError("Unknown object type '%s'" % (t)) self.push(tree_objspec, _ObjHandler(self, callback, prefix)) self.process() class GitRefMap(object): """Map Git ref names to the Git object names they currently point to. Behaves like a dictionary of Git ref names -> Git object names. """ def __init__ (self, obj_fetcher): """Create a new Git ref -> object map.""" self.obj_fetcher = obj_fetcher self._cache = {} # dict: refname -> objname def _load (self, ref): """Retrieve the object currently bound to the given ref. The name of the object pointed to by the given ref is stored into this mapping, and also returned. """ if ref not in self._cache: self._cache[ref] = self.obj_fetcher.get_sha1(ref) return self._cache[ref] def __contains__ (self, refname): """Return True if the given refname is present in this cache.""" return bool(self._load(refname)) def __getitem__ (self, refname): """Return the git object name pointed to by the given refname.""" commit = self._load(refname) if commit is None: raise KeyError("Unknown ref '%s'" % (refname)) return commit def get (self, refname, default = None): """Return the git object name pointed to by the given refname.""" commit = self._load(refname) if commit is None: return default return commit class GitFICommit(object): """Encapsulate the data in a Git fast-import commit command.""" SHA1RE = re.compile(r'^[0-9a-f]{40}$') @classmethod def parse_mode (cls, mode): """Verify the given git file mode, and return it as a string.""" assert mode in (644, 755, 100644, 100755, 120000) return "%i" % (mode) @classmethod def parse_objname (cls, objname): """Return the given object name (or mark number) as a string.""" if isinstance(objname, int): # Object name is a mark number assert objname > 0 return ":%i" % (objname) # No existence check is done, only checks for valid format assert cls.SHA1RE.match(objname) # Object name is valid SHA1 return objname @classmethod def quote_path (cls, path): """Return a quoted version of the given path.""" path = path.replace("\\", "\\\\") path = path.replace("\n", "\\n") path = path.replace('"', '\\"') return '"%s"' % (path) @classmethod def parse_path (cls, path): """Verify that the given path is valid, and quote it, if needed.""" assert not isinstance(path, int) # Cannot be a mark number # These checks verify the rules on the fast-import man page assert not path.count("//") assert not path.endswith("/") assert not path.startswith("/") assert not path.count("/./") assert not path.count("/../") assert not path.endswith("/.") assert not path.endswith("/..") assert not path.startswith("./") assert not path.startswith("../") if path.count('"') + path.count('\n') + path.count('\\'): return cls.quote_path(path) return path def __init__ (self, name, email, timestamp, timezone, message): """Create a new Git fast-import commit, with the given metadata.""" self.name = name self.email = email self.timestamp = timestamp self.timezone = timezone self.message = message self.pathops = [] # List of path operations in this commit def modify (self, mode, blobname, path): """Add a file modification to this Git fast-import commit.""" self.pathops.append(("M", self.parse_mode(mode), self.parse_objname(blobname), self.parse_path(path))) def delete (self, path): """Add a file deletion to this Git fast-import commit.""" self.pathops.append(("D", self.parse_path(path))) def copy (self, path, newpath): """Add a file copy to this Git fast-import commit.""" self.pathops.append(("C", self.parse_path(path), self.parse_path(newpath))) def rename (self, path, newpath): """Add a file rename to this Git fast-import commit.""" self.pathops.append(("R", self.parse_path(path), self.parse_path(newpath))) def note (self, blobname, commit): """Add a note object to this Git fast-import commit.""" self.pathops.append(("N", self.parse_objname(blobname), self.parse_objname(commit))) def deleteall (self): """Delete all files in this Git fast-import commit.""" self.pathops.append("deleteall") class TestGitFICommit(unittest.TestCase): """GitFICommit selftests.""" def test_basic (self): """GitFICommit basic selftests.""" def expect_fail (method, data): """Verify that the method(data) raises an AssertionError.""" try: method(data) except AssertionError: return raise AssertionError("Failed test for invalid data '%s(%s)'" % (method.__name__, repr(data))) def test_parse_mode (self): """GitFICommit.parse_mode() selftests.""" self.assertEqual(GitFICommit.parse_mode(644), "644") self.assertEqual(GitFICommit.parse_mode(755), "755") self.assertEqual(GitFICommit.parse_mode(100644), "100644") self.assertEqual(GitFICommit.parse_mode(100755), "100755") self.assertEqual(GitFICommit.parse_mode(120000), "120000") self.assertRaises(AssertionError, GitFICommit.parse_mode, 0) self.assertRaises(AssertionError, GitFICommit.parse_mode, 123) self.assertRaises(AssertionError, GitFICommit.parse_mode, 600) self.assertRaises(AssertionError, GitFICommit.parse_mode, "644") self.assertRaises(AssertionError, GitFICommit.parse_mode, "abc") def test_parse_objname (self): """GitFICommit.parse_objname() selftests.""" self.assertEqual(GitFICommit.parse_objname(1), ":1") self.assertRaises(AssertionError, GitFICommit.parse_objname, 0) self.assertRaises(AssertionError, GitFICommit.parse_objname, -1) self.assertEqual(GitFICommit.parse_objname("0123456789" * 4), "0123456789" * 4) self.assertEqual(GitFICommit.parse_objname("2468abcdef" * 4), "2468abcdef" * 4) self.assertRaises(AssertionError, GitFICommit.parse_objname, "abcdefghij" * 4) def test_parse_path (self): """GitFICommit.parse_path() selftests.""" self.assertEqual(GitFICommit.parse_path("foo/bar"), "foo/bar") self.assertEqual(GitFICommit.parse_path("path/with\n and \" in it"), '"path/with\\n and \\" in it"') self.assertRaises(AssertionError, GitFICommit.parse_path, 1) self.assertRaises(AssertionError, GitFICommit.parse_path, 0) self.assertRaises(AssertionError, GitFICommit.parse_path, -1) self.assertRaises(AssertionError, GitFICommit.parse_path, "foo//bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/bar/") self.assertRaises(AssertionError, GitFICommit.parse_path, "/foo/bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/./bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/../bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/bar/.") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/bar/..") self.assertRaises(AssertionError, GitFICommit.parse_path, "./foo/bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "../foo/bar") class GitFastImport(object): """Encapsulate communication with git fast-import.""" def __init__ (self, f, obj_fetcher, last_mark = 0): """Set up self to communicate with a fast-import process through f.""" self.f = f # File object where fast-import stream is written self.obj_fetcher = obj_fetcher # GitObjectFetcher instance self.next_mark = last_mark + 1 # Next mark number self.refs = set() # Keep track of the refnames we've seen def comment (self, s): """Write the given comment in the fast-import stream.""" assert "\n" not in s, "Malformed comment: '%s'" % (s) self.f.write("# %s\n" % (s)) def commit (self, ref, commitdata): """Make a commit on the given ref, with the given GitFICommit. Return the mark number identifying this commit. """ self.f.write("""\ commit %(ref)s mark :%(mark)i committer %(name)s <%(email)s> %(timestamp)i %(timezone)s data %(msgLength)i %(msg)s """ % { 'ref': ref, 'mark': self.next_mark, 'name': commitdata.name, 'email': commitdata.email, 'timestamp': commitdata.timestamp, 'timezone': commitdata.timezone, 'msgLength': len(commitdata.message), 'msg': commitdata.message, }) if ref not in self.refs: self.refs.add(ref) parent = ref + "^0" if self.obj_fetcher.get_sha1(parent): self.f.write("from %s\n" % (parent)) for op in commitdata.pathops: self.f.write(" ".join(op)) self.f.write("\n") self.f.write("\n") retval = self.next_mark self.next_mark += 1 return retval def blob (self, data): """Import the given blob. Return the mark number identifying this blob. """ self.f.write("blob\nmark :%i\ndata %i\n%s\n" % (self.next_mark, len(data), data)) retval = self.next_mark self.next_mark += 1 return retval def reset (self, ref, objname): """Reset the given ref to point at the given Git object.""" self.f.write("reset %s\nfrom %s\n\n" % (ref, GitFICommit.parse_objname(objname))) if ref not in self.refs: self.refs.add(ref) class GitNotes(object): """Encapsulate access to Git notes. Simulates a dictionary of object name (SHA1) -> Git note mappings. """ def __init__ (self, notes_ref, obj_fetcher): """Create a new Git notes interface, bound to the given notes ref.""" self.notes_ref = notes_ref self.obj_fetcher = obj_fetcher # Used to get objects from repo self.imports = [] # list: (objname, note data blob name) tuples def __del__ (self): """Verify that self.commit_notes() was called before destruction.""" if self.imports: error("Missing call to self.commit_notes().") error("%i notes are not committed!", len(self.imports)) def _load (self, objname): """Return the note data associated with the given git object. The note data is returned in string form. If no note is found for the given object, None is returned. """ try: f = self.obj_fetcher.open_obj("%s:%s" % (self.notes_ref, objname)) ret = f.read() f.close() except KeyError: ret = None return ret def __getitem__ (self, objname): """Return the note contents associated with the given object. Raise KeyError if given object has no associated note. """ blobdata = self._load(objname) if blobdata is None: raise KeyError("Object '%s' has no note" % (objname)) return blobdata def get (self, objname, default = None): """Return the note contents associated with the given object. Return given default if given object has no associated note. """ blobdata = self._load(objname) if blobdata is None: return default return blobdata def import_note (self, objname, data, gfi): """Tell git fast-import to store data as a note for objname. This method uses the given GitFastImport object to create a blob containing the given note data. Also an entry mapping the given object name to the created blob is stored until commit_notes() is called. Note that this method only works if it is later followed by a call to self.commit_notes() (which produces the note commit that refers to the blob produced here). """ if not data.endswith("\n"): data += "\n" gfi.comment("Importing note for object %s" % (objname)) mark = gfi.blob(data) self.imports.append((objname, mark)) def commit_notes (self, gfi, author, message): """Produce a git fast-import note commit for the imported notes. This method uses the given GitFastImport object to create a commit on the notes ref, introducing the notes previously submitted to import_note(). """ if not self.imports: return commitdata = GitFICommit(author[0], author[1], time.time(), "0000", message) for objname, blobname in self.imports: assert isinstance(objname, int) and objname > 0 assert isinstance(blobname, int) and blobname > 0 commitdata.note(blobname, objname) gfi.commit(self.notes_ref, commitdata) self.imports = [] class GitCachedNotes(GitNotes): """Encapsulate access to Git notes (cached version). Only use this class if no caching is done at a higher level. Simulates a dictionary of object name (SHA1) -> Git note mappings. """ def __init__ (self, notes_ref, obj_fetcher): """Set up a caching wrapper around GitNotes.""" GitNotes.__init__(self, notes_ref, obj_fetcher) self._cache = {} # Cache: object name -> note data def __del__ (self): """Verify that GitNotes' destructor is called.""" GitNotes.__del__(self) def _load (self, objname): """Extend GitNotes._load() with a local objname -> note cache.""" if objname not in self._cache: self._cache[objname] = GitNotes._load(self, objname) return self._cache[objname] def import_note (self, objname, data, gfi): """Extend GitNotes.import_note() with a local objname -> note cache.""" if not data.endswith("\n"): data += "\n" assert objname not in self._cache self._cache[objname] = data GitNotes.import_note(self, objname, data, gfi) if __name__ == '__main__': unittest.main()
gpl-2.0
KerkhoffTechnologies/django-connectwise
djconnectwise/tests/test_commands.py
1
24129
import io from django.core.management import call_command from django.test import TestCase from djconnectwise import models from . import mocks from . import fixtures from . import fixture_utils from .. import sync def sync_summary(class_name, created_count): return '{} Sync Summary - Created: {}, Updated: 0, Skipped: 0'.format( class_name, created_count ) def full_sync_summary(class_name, deleted_count): return '{} Sync Summary - Created: 0, Updated: 0, Skipped: 0, ' \ 'Deleted: {}'.format(class_name, deleted_count) def slug_to_title(slug): return slug.title().replace('_', ' ') class AbstractBaseSyncTest(object): def _test_sync(self, mock_call, return_value, cw_object, full_option=False): mock_call(return_value) out = io.StringIO() args = ['cwsync', cw_object] if full_option: args.append('--full') call_command(*args, stdout=out) return out def _title_for_cw_object(self, cw_object): return cw_object.title().replace('_', ' ') def test_sync(self): out = self._test_sync(*self.args) obj_title = self._title_for_cw_object(self.args[-1]) self.assertIn(obj_title, out.getvalue().strip()) def test_full_sync(self): self.test_sync() mock_call, return_value, cw_object = self.args args = [ mock_call, [], cw_object ] out = self._test_sync(*args, full_option=True) obj_label = self._title_for_cw_object(cw_object) msg_tmpl = '{} Sync Summary - Created: 0, Updated: 0, Skipped: 0, ' \ 'Deleted: {}' msg = msg_tmpl.format(obj_label, len(return_value)) self.assertEqual(msg, out.getvalue().strip()) class TestSyncCalendarsCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.schedule_api_get_calendars_call, fixtures.API_SCHEDULE_CALENDAR_LIST, 'calendar' ) class TestSyncHolidayCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.schedule_api_get_holidays_call, fixtures.API_SCHEDULE_HOLIDAY_MODEL_LIST, 'holiday' ) def setUp(self): fixture_utils.init_holiday_lists() class TestSyncHolidayListCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.schedule_api_get_holiday_lists_call, fixtures.API_SCHEDULE_HOLIDAY_LIST_LIST, 'holiday_list' ) class TestSyncCompanyStatusesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.company_api_get_company_statuses_call, fixtures.API_COMPANY_STATUS_LIST, 'company_status', ) class TestSyncTerritoriesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.system_api_get_territories_call, fixtures.API_SYSTEM_TERRITORY_LIST, 'territory', ) class TestSyncContactsCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_contacts() args = ( mocks.company_api_get_contacts, fixtures.API_COMPANY_CONTACT_LIST, 'contact', ) class TestSyncCommunicationTypesCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_communication_types() args = ( mocks.company_api_get_communication_types, fixtures.API_COMMUNICATION_TYPE_LIST, 'communication_type', ) class TestSyncContactCommunicationsCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_contacts() fixture_utils.init_contact_communications() args = ( mocks.company_api_get_contact_communications, fixtures.API_CONTACT_COMMUNICATION_LIST, 'contact_communication', ) class TestSyncCompaniesCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_territories() args = ( mocks.company_api_get_call, fixtures.API_COMPANY_LIST, 'company', ) class TestSyncCompanyTypesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.company_api_get_company_types_call, fixtures.API_COMPANY_TYPES_LIST, 'company_type' ) class TestSyncTeamsCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_teams_call, fixtures.API_SERVICE_TEAM_LIST, 'team', ) def setUp(self): super().setUp() fixture_utils.init_boards() class TestSyncBoardsCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_boards_call, fixtures.API_BOARD_LIST, 'board', ) class TestSyncLocationsCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_locations_call, fixtures.API_SERVICE_LOCATION_LIST, 'location', ) class TestSyncMyCompanyOtherCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.system_api_get_other_call, fixtures.API_SYSTEM_OTHER_LIST, 'company_other', ) def setUp(self): fixture_utils.init_calendars() class TestSyncPrioritiesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_priorities_call, fixtures.API_SERVICE_PRIORITY_LIST, 'priority', ) class TestSyncProjectStatusesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.projects_api_get_project_statuses_call, fixtures.API_PROJECT_STATUSES, 'project_status', ) class TestSyncProjectTypesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.projects_api_get_project_types_call, fixtures.API_PROJECT_TYPES, 'project_type', ) class TestSyncProjectsCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.project_api_get_projects_call, fixtures.API_PROJECT_LIST, 'project', ) def setUp(self): super().setUp() fixture_utils.init_project_statuses() class TestSyncProjectPhaseCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.projects_api_get_project_phases_call, fixtures.API_PROJECT_PHASE_LIST, 'project_phase' ) def setUp(self): super().setUp() fixture_utils.init_projects() fixture_utils.init_project_phases() class TestSyncProjectTeamMemberCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.project_api_get_team_members_call, fixtures.API_PROJECT_TEAM_MEMBER_LIST, 'project_team_member' ) def setUp(self): super().setUp() mocks.system_api_get_member_image_by_photo_id_call( (mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar())) fixture_utils.init_members() fixture_utils.init_work_roles() fixture_utils.init_project_statuses() fixture_utils.init_projects() fixture_utils.init_project_team_members() class TestSyncBoardsStatusesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_statuses_call, fixtures.API_BOARD_STATUS_LIST, 'board_status', ) def setUp(self): board_synchronizer = sync.BoardSynchronizer() models.ConnectWiseBoard.objects.all().delete() _, _patch = mocks.service_api_get_boards_call(fixtures.API_BOARD_LIST) board_synchronizer.sync() _patch.stop() class TestSyncSLAsCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_slas_call, fixtures.API_SERVICE_SLA_LIST, 'sla', ) def setUp(self): fixture_utils.init_calendars() class TestSyncSLAPrioritiesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_sla_priorities_call, fixtures.API_SERVICE_SLA_PRIORITY_LIST, 'sla_priority' ) def setUp(self): fixture_utils.init_calendars() fixture_utils.init_slas() fixture_utils.init_priorities() class TestSyncServiceNotesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.service_api_get_notes_call, fixtures.API_SERVICE_NOTE_LIST, 'service_note' ) def setUp(self): super().setUp() fixture_utils.init_service_notes() fixture_utils.init_members() fixture_utils.init_territories() fixture_utils.init_companies() fixture_utils.init_boards() fixture_utils.init_board_statuses() fixture_utils.init_tickets() class TestSyncOpportunityNotesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_opportunity_notes_call, fixtures.API_SALES_OPPORTUNITY_NOTE_LIST, 'opportunity_note' ) def setUp(self): super().setUp() fixture_utils.init_opportunity_statuses() fixture_utils.init_opportunity_stages() fixture_utils.init_opportunity_types() fixture_utils.init_opportunities() fixture_utils.init_opportunity_notes() class TestSyncOpportunityCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_opportunities_call, [fixtures.API_SALES_OPPORTUNITY], 'opportunity', ) def setUp(self): super().setUp() fixture_utils.init_territories() fixture_utils.init_companies() fixture_utils.init_members() fixture_utils.init_opportunity_statuses() fixture_utils.init_opportunity_stages() fixture_utils.init_opportunity_types() class TestSyncOpportunityStagesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_opportunity_stages_call, fixtures.API_SALES_OPPORTUNITY_STAGES, 'opportunity_stage' ) class TestSyncOpportunityStatusesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_opportunity_statuses_call, fixtures.API_SALES_OPPORTUNITY_STATUSES, 'opportunity_status', ) class TestSyncOpportunityTypesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_opportunity_types_call, fixtures.API_SALES_OPPORTUNITY_TYPES, 'opportunity_type', ) class TestSyncSalesProbabilitiesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_sales_probabilities_call, fixtures.API_SALES_PROBABILITY_LIST, 'sales_probability' ) class TestSyncTimeEntriesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.time_api_get_time_entries_call, fixtures.API_TIME_ENTRY_LIST, 'time_entry' ) def setUp(self): super().setUp() fixture_utils.init_boards() fixture_utils.init_board_statuses() fixture_utils.init_tickets() fixture_utils.init_territories() fixture_utils.init_companies() fixture_utils.init_members() class TestSyncScheduleEntriesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.schedule_api_get_schedule_entries_call, fixtures.API_SCHEDULE_ENTRIES, 'schedule_entry' ) def setUp(self): super().setUp() fixture_utils.init_boards() fixture_utils.init_board_statuses() fixture_utils.init_territories() fixture_utils.init_companies() fixture_utils.init_locations() fixture_utils.init_teams() fixture_utils.init_members() fixture_utils.init_priorities() fixture_utils.init_projects() fixture_utils.init_schedule_types() fixture_utils.init_schedule_statuses() fixture_utils.init_tickets() class TestSyncScheduleTypesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.schedule_api_get_schedule_types_call, fixtures.API_SCHEDULE_TYPE_LIST, 'schedule_type' ) class TestSyncScheduleStatusesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.schedule_api_get_schedule_statuses_call, fixtures.API_SCHEDULE_STATUS_LIST, 'schedule_status' ) class TestSyncActivityStatusesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_activities_statuses_call, fixtures.API_SALES_ACTIVITY_STATUSES, 'activity_status', ) class TestSyncActivityTypesCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_activities_types_call, fixtures.API_SALES_ACTIVITY_TYPES, 'activity_type', ) class TestSyncActivityCommand(AbstractBaseSyncTest, TestCase): args = ( mocks.sales_api_get_activities_call, fixtures.API_SALES_ACTIVITIES, 'activity' ) def setUp(self): super().setUp() fixture_utils.init_territories() fixture_utils.init_companies() mocks.system_api_get_member_image_by_photo_id_call( (mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar())) fixture_utils.init_members() fixture_utils.init_opportunity_statuses() fixture_utils.init_opportunity_types() fixture_utils.init_opportunities() fixture_utils.init_tickets() fixture_utils.init_board_statuses() fixture_utils.init_activities() class TestSyncTypeCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_types() fixture_utils.init_boards() args = ( mocks.service_api_get_types_call, fixtures.API_TYPE_LIST, 'type' ) class TestSyncSubTypeCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_types() fixture_utils.init_boards() args = ( mocks.service_api_get_subtypes_call, fixtures.API_SUBTYPE_LIST, 'sub_type' ) class TestSyncItemCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_types() fixture_utils.init_boards() args = ( mocks.service_api_get_items_call, fixtures.API_ITEM_LIST, 'item' ) class TestSyncTypeSubTypeItemAssociationCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_types() fixture_utils.init_boards() args = ( mocks.service_api_get_type_subtype_item_associations_call, fixtures.API_TYPE_SUBTYPE_ITEM_ASSOCIATION_LIST, 'type_subtype_item_association' ) class TestSyncWorkTypeCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_work_types() args = ( mocks.time_api_get_work_types_call, fixtures.API_WORK_TYPE_LIST, 'work_type' ) class TestSyncWorkRoleCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_work_roles() args = ( mocks.time_api_get_work_roles_call, fixtures.API_WORK_ROLE_LIST, 'work_role' ) class TestSyncAgreementCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() fixture_utils.init_agreements() args = ( mocks.finance_api_get_agreements_call, fixtures.API_AGREEMENT_LIST, 'agreement' ) class TestSyncProjectTicketCommand(AbstractBaseSyncTest, TestCase): def setUp(self): super().setUp() mocks.system_api_get_member_image_by_photo_id_call( (mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar())) fixture_utils.init_members() fixture_utils.init_board_statuses() fixture_utils.init_project_tickets() args = ( mocks.project_api_tickets_test_command, fixtures.API_PROJECT_TICKET_LIST, 'project_ticket' ) class TestSyncAllCommand(TestCase): def setUp(self): super().setUp() mocks.system_api_get_members_call([fixtures.API_MEMBER]) mocks.system_api_get_member_image_by_photo_id_call( (mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar())) mocks.company_api_by_id_call(fixtures.API_COMPANY) mocks.service_api_tickets_call() sync_test_cases = [ TestSyncCompanyStatusesCommand, TestSyncContactsCommand, TestSyncCommunicationTypesCommand, TestSyncContactCommunicationsCommand, TestSyncCompaniesCommand, TestSyncCompanyTypesCommand, TestSyncLocationsCommand, TestSyncPrioritiesCommand, TestSyncProjectPhaseCommand, TestSyncProjectStatusesCommand, TestSyncProjectsCommand, TestSyncTeamsCommand, TestSyncBoardsStatusesCommand, TestSyncBoardsCommand, TestSyncServiceNotesCommand, TestSyncOpportunityNotesCommand, TestSyncOpportunityStatusesCommand, TestSyncOpportunityStagesCommand, TestSyncOpportunityTypesCommand, TestSyncOpportunityCommand, TestSyncSalesProbabilitiesCommand, TestSyncActivityStatusesCommand, TestSyncActivityTypesCommand, TestSyncActivityCommand, TestSyncScheduleTypesCommand, TestSyncScheduleStatusesCommand, TestSyncScheduleEntriesCommand, TestSyncTimeEntriesCommand, TestSyncTerritoriesCommand, TestSyncSLAsCommand, TestSyncCalendarsCommand, TestSyncSLAPrioritiesCommand, TestSyncMyCompanyOtherCommand, TestSyncHolidayCommand, TestSyncHolidayListCommand, TestSyncTypeCommand, TestSyncSubTypeCommand, TestSyncItemCommand, TestSyncTypeSubTypeItemAssociationCommand, TestSyncWorkTypeCommand, TestSyncWorkRoleCommand, TestSyncAgreementCommand, TestSyncProjectTypesCommand, TestSyncProjectTeamMemberCommand ] self.test_args = [] for test_case in sync_test_cases: self.test_args.append(test_case.args) apicall, fixture, cw_object = test_case.args apicall(fixture) def run_sync_command(self, full_option=False): out = io.StringIO() args = ['cwsync'] if full_option: args.append('--full') call_command(*args, stdout=out) return out.getvalue().strip() def test_partial_sync(self): """ Test the command to run a sync of all objects without the --full argument. """ output = self.run_sync_command() for apicall, fixture, cw_object in self.test_args: summary = sync_summary(slug_to_title(cw_object), len(fixture)) self.assertIn(summary, output) self.assertEqual(models.Team.objects.all().count(), len(fixtures.API_SERVICE_TEAM_LIST)) self.assertEqual(models.Contact.objects.all().count(), len(fixtures.API_COMPANY_CONTACT_LIST)) self.assertEqual(models.ContactCommunication.objects.all().count(), len(fixtures.API_CONTACT_COMMUNICATION_LIST)) self.assertEqual(models.CompanyStatus.objects.all().count(), len(fixtures.API_COMPANY_STATUS_LIST)) self.assertEqual(models.TicketPriority.objects.all().count(), len(fixtures.API_SERVICE_PRIORITY_LIST)) self.assertEqual(models.ConnectWiseBoard.objects.all().count(), len(fixtures.API_BOARD_LIST)) self.assertEqual(models.BoardStatus.objects.all().count(), len(fixtures.API_BOARD_STATUS_LIST)) self.assertEqual(models.Location.objects.all().count(), 1) self.assertEqual(models.Ticket.objects.all().count(), len([fixtures.API_SERVICE_TICKET])) def test_full_sync(self): """Test the command to run a full sync of all objects.""" cw_object_map = { 'member': models.Member, 'board': models.ConnectWiseBoard, 'priority': models.TicketPriority, 'project_status': models.ProjectStatus, 'project': models.Project, 'project_phase': models.ProjectPhase, 'board_status': models.BoardStatus, 'territory': models.Territory, 'company_status': models.CompanyStatus, 'company_type': models.CompanyType, 'team': models.Team, 'location': models.Location, 'ticket': models.Ticket, 'service_note': models.ServiceNote, 'opportunity_note': models.OpportunityNote, 'contact': models.Contact, 'communication_type': models.CommunicationType, 'contact_communication': models.ContactCommunication, 'company': models.Company, 'opportunity': models.Opportunity, 'opportunity_status': models.OpportunityStatus, 'opportunity_stage': models.OpportunityStage, 'opportunity_type': models.OpportunityType, 'sales_probability': models.SalesProbability, 'activity_status': models.ActivityStatus, 'activity_type': models.ActivityType, 'activity': models.Activity, 'schedule_entry': models.ScheduleEntry, 'schedule_type': models.ScheduleType, 'schedule_status': models.ScheduleStatus, 'time_entry': models.TimeEntry, 'sla': models.Sla, 'calendar': models.Calendar, 'sla_priority': models.SlaPriority, 'company_other': models.MyCompanyOther, 'holiday': models.Holiday, 'holiday_list': models.HolidayList, 'type': models.Type, 'sub_type': models.SubType, 'item': models.Item, 'type_subtype_item_association': models.TypeSubTypeItemAssociation, 'work_type': models.WorkType, 'work_role': models.WorkRole, 'agreement': models.Agreement, 'project_type': models.ProjectType, 'project_team_member': models.ProjectTeamMember, } # Run partial sync first self.run_sync_command() fixture_utils.init_projects() fixture_utils.init_members() fixture_utils.init_board_statuses() fixture_utils.init_teams() pre_full_sync_counts = {} for key, model_class in cw_object_map.items(): pre_full_sync_counts[key] = model_class.objects.all().count() mocks.system_api_get_members_call([]) mocks.company_api_by_id_call([]) for apicall, _, _ in self.test_args: apicall([]) output = self.run_sync_command(full_option=True) for apicall, fixture, cw_object in self.test_args: if cw_object in ( 'team', 'service_note', 'opportunity_note', 'sla_priority', 'holiday', ): # Assert that there were objects to get deleted, then change # to zero to verify the output formats correctly. # We are just testing the command, there are sync tests to # verify that the syncronizers work correctly self.assertGreater(pre_full_sync_counts[cw_object], 0) pre_full_sync_counts[cw_object] = 0 summary = full_sync_summary( slug_to_title(cw_object), pre_full_sync_counts[cw_object] ) self.assertIn(summary, output) class TestListUsersCommand(TestCase): def test_command(self): # We don't need to check output carefully. Just verify it # doesn't explode. self.assertEqual( call_command('list_users'), None )
mit
yqm/sl4a
python/src/Lib/test/test_hmac.py
58
13069
import hmac import hashlib import unittest import warnings from test import test_support class TestVectorsTestCase(unittest.TestCase): def test_md5_vectors(self): # Test the HMAC module against test vectors from the RFC. def md5test(key, data, digest): h = hmac.HMAC(key, data) self.assertEqual(h.hexdigest().upper(), digest.upper()) md5test(chr(0x0b) * 16, "Hi There", "9294727A3638BB1C13F48EF8158BFC9D") md5test("Jefe", "what do ya want for nothing?", "750c783e6ab0b503eaa86e310a5db738") md5test(chr(0xAA)*16, chr(0xDD)*50, "56be34521d144c88dbb8c733f0e8b3f6") md5test("".join([chr(i) for i in range(1, 26)]), chr(0xCD) * 50, "697eaf0aca3a3aea3a75164746ffaa79") md5test(chr(0x0C) * 16, "Test With Truncation", "56461ef2342edc00f9bab995690efd4c") md5test(chr(0xAA) * 80, "Test Using Larger Than Block-Size Key - Hash Key First", "6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd") md5test(chr(0xAA) * 80, ("Test Using Larger Than Block-Size Key " "and Larger Than One Block-Size Data"), "6f630fad67cda0ee1fb1f562db3aa53e") def test_sha_vectors(self): def shatest(key, data, digest): h = hmac.HMAC(key, data, digestmod=hashlib.sha1) self.assertEqual(h.hexdigest().upper(), digest.upper()) shatest(chr(0x0b) * 20, "Hi There", "b617318655057264e28bc0b6fb378c8ef146be00") shatest("Jefe", "what do ya want for nothing?", "effcdf6ae5eb2fa2d27416d5f184df9c259a7c79") shatest(chr(0xAA)*20, chr(0xDD)*50, "125d7342b9ac11cd91a39af48aa17b4f63f175d3") shatest("".join([chr(i) for i in range(1, 26)]), chr(0xCD) * 50, "4c9007f4026250c6bc8414f9bf50c86c2d7235da") shatest(chr(0x0C) * 20, "Test With Truncation", "4c1a03424b55e07fe7f27be1d58bb9324a9a5a04") shatest(chr(0xAA) * 80, "Test Using Larger Than Block-Size Key - Hash Key First", "aa4ae5e15272d00e95705637ce8a3b55ed402112") shatest(chr(0xAA) * 80, ("Test Using Larger Than Block-Size Key " "and Larger Than One Block-Size Data"), "e8e99d0f45237d786d6bbaa7965c7808bbff1a91") def _rfc4231_test_cases(self, hashfunc): def hmactest(key, data, hexdigests): h = hmac.HMAC(key, data, digestmod=hashfunc) self.assertEqual(h.hexdigest().lower(), hexdigests[hashfunc]) # 4.2. Test Case 1 hmactest(key = '\x0b'*20, data = 'Hi There', hexdigests = { hashlib.sha224: '896fb1128abbdf196832107cd49df33f' '47b4b1169912ba4f53684b22', hashlib.sha256: 'b0344c61d8db38535ca8afceaf0bf12b' '881dc200c9833da726e9376c2e32cff7', hashlib.sha384: 'afd03944d84895626b0825f4ab46907f' '15f9dadbe4101ec682aa034c7cebc59c' 'faea9ea9076ede7f4af152e8b2fa9cb6', hashlib.sha512: '87aa7cdea5ef619d4ff0b4241a1d6cb0' '2379f4e2ce4ec2787ad0b30545e17cde' 'daa833b7d6b8a702038b274eaea3f4e4' 'be9d914eeb61f1702e696c203a126854', }) # 4.3. Test Case 2 hmactest(key = 'Jefe', data = 'what do ya want for nothing?', hexdigests = { hashlib.sha224: 'a30e01098bc6dbbf45690f3a7e9e6d0f' '8bbea2a39e6148008fd05e44', hashlib.sha256: '5bdcc146bf60754e6a042426089575c7' '5a003f089d2739839dec58b964ec3843', hashlib.sha384: 'af45d2e376484031617f78d2b58a6b1b' '9c7ef464f5a01b47e42ec3736322445e' '8e2240ca5e69e2c78b3239ecfab21649', hashlib.sha512: '164b7a7bfcf819e2e395fbe73b56e0a3' '87bd64222e831fd610270cd7ea250554' '9758bf75c05a994a6d034f65f8f0e6fd' 'caeab1a34d4a6b4b636e070a38bce737', }) # 4.4. Test Case 3 hmactest(key = '\xaa'*20, data = '\xdd'*50, hexdigests = { hashlib.sha224: '7fb3cb3588c6c1f6ffa9694d7d6ad264' '9365b0c1f65d69d1ec8333ea', hashlib.sha256: '773ea91e36800e46854db8ebd09181a7' '2959098b3ef8c122d9635514ced565fe', hashlib.sha384: '88062608d3e6ad8a0aa2ace014c8a86f' '0aa635d947ac9febe83ef4e55966144b' '2a5ab39dc13814b94e3ab6e101a34f27', hashlib.sha512: 'fa73b0089d56a284efb0f0756c890be9' 'b1b5dbdd8ee81a3655f83e33b2279d39' 'bf3e848279a722c806b485a47e67c807' 'b946a337bee8942674278859e13292fb', }) # 4.5. Test Case 4 hmactest(key = ''.join([chr(x) for x in xrange(0x01, 0x19+1)]), data = '\xcd'*50, hexdigests = { hashlib.sha224: '6c11506874013cac6a2abc1bb382627c' 'ec6a90d86efc012de7afec5a', hashlib.sha256: '82558a389a443c0ea4cc819899f2083a' '85f0faa3e578f8077a2e3ff46729665b', hashlib.sha384: '3e8a69b7783c25851933ab6290af6ca7' '7a9981480850009cc5577c6e1f573b4e' '6801dd23c4a7d679ccf8a386c674cffb', hashlib.sha512: 'b0ba465637458c6990e5a8c5f61d4af7' 'e576d97ff94b872de76f8050361ee3db' 'a91ca5c11aa25eb4d679275cc5788063' 'a5f19741120c4f2de2adebeb10a298dd', }) # 4.7. Test Case 6 hmactest(key = '\xaa'*131, data = 'Test Using Larger Than Block-Siz' 'e Key - Hash Key First', hexdigests = { hashlib.sha224: '95e9a0db962095adaebe9b2d6f0dbce2' 'd499f112f2d2b7273fa6870e', hashlib.sha256: '60e431591ee0b67f0d8a26aacbf5b77f' '8e0bc6213728c5140546040f0ee37f54', hashlib.sha384: '4ece084485813e9088d2c63a041bc5b4' '4f9ef1012a2b588f3cd11f05033ac4c6' '0c2ef6ab4030fe8296248df163f44952', hashlib.sha512: '80b24263c7c1a3ebb71493c1dd7be8b4' '9b46d1f41b4aeec1121b013783f8f352' '6b56d037e05f2598bd0fd2215d6a1e52' '95e64f73f63f0aec8b915a985d786598', }) # 4.8. Test Case 7 hmactest(key = '\xaa'*131, data = 'This is a test using a larger th' 'an block-size key and a larger t' 'han block-size data. The key nee' 'ds to be hashed before being use' 'd by the HMAC algorithm.', hexdigests = { hashlib.sha224: '3a854166ac5d9f023f54d517d0b39dbd' '946770db9c2b95c9f6f565d1', hashlib.sha256: '9b09ffa71b942fcb27635fbcd5b0e944' 'bfdc63644f0713938a7f51535c3a35e2', hashlib.sha384: '6617178e941f020d351e2f254e8fd32c' '602420feb0b8fb9adccebb82461e99c5' 'a678cc31e799176d3860e6110c46523e', hashlib.sha512: 'e37b6a775dc87dbaa4dfa9f96e5e3ffd' 'debd71f8867289865df5a32d20cdc944' 'b6022cac3c4982b10d5eeb55c3e4de15' '134676fb6de0446065c97440fa8c6a58', }) def test_sha224_rfc4231(self): self._rfc4231_test_cases(hashlib.sha224) def test_sha256_rfc4231(self): self._rfc4231_test_cases(hashlib.sha256) def test_sha384_rfc4231(self): self._rfc4231_test_cases(hashlib.sha384) def test_sha512_rfc4231(self): self._rfc4231_test_cases(hashlib.sha512) def test_legacy_block_size_warnings(self): class MockCrazyHash(object): """Ain't no block_size attribute here.""" def __init__(self, *args): self._x = hashlib.sha1(*args) self.digest_size = self._x.digest_size def update(self, v): self._x.update(v) def digest(self): return self._x.digest() with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) try: hmac.HMAC('a', 'b', digestmod=MockCrazyHash) except RuntimeWarning: pass else: self.fail('Expected warning about missing block_size') MockCrazyHash.block_size = 1 try: hmac.HMAC('a', 'b', digestmod=MockCrazyHash) except RuntimeWarning: pass else: self.fail('Expected warning about small block_size') class ConstructorTestCase(unittest.TestCase): def test_normal(self): # Standard constructor call. failed = 0 try: h = hmac.HMAC("key") except: self.fail("Standard constructor call raised exception.") def test_withtext(self): # Constructor call with text. try: h = hmac.HMAC("key", "hash this!") except: self.fail("Constructor call with text argument raised exception.") def test_withmodule(self): # Constructor call with text and digest module. try: h = hmac.HMAC("key", "", hashlib.sha1) except: self.fail("Constructor call with hashlib.sha1 raised exception.") class SanityTestCase(unittest.TestCase): def test_default_is_md5(self): # Testing if HMAC defaults to MD5 algorithm. # NOTE: this whitebox test depends on the hmac class internals h = hmac.HMAC("key") self.failUnless(h.digest_cons == hashlib.md5) def test_exercise_all_methods(self): # Exercising all methods once. # This must not raise any exceptions try: h = hmac.HMAC("my secret key") h.update("compute the hash of this text!") dig = h.digest() dig = h.hexdigest() h2 = h.copy() except: self.fail("Exception raised during normal usage of HMAC class.") class CopyTestCase(unittest.TestCase): def test_attributes(self): # Testing if attributes are of same type. h1 = hmac.HMAC("key") h2 = h1.copy() self.failUnless(h1.digest_cons == h2.digest_cons, "digest constructors don't match.") self.failUnless(type(h1.inner) == type(h2.inner), "Types of inner don't match.") self.failUnless(type(h1.outer) == type(h2.outer), "Types of outer don't match.") def test_realcopy(self): # Testing if the copy method created a real copy. h1 = hmac.HMAC("key") h2 = h1.copy() # Using id() in case somebody has overridden __cmp__. self.failUnless(id(h1) != id(h2), "No real copy of the HMAC instance.") self.failUnless(id(h1.inner) != id(h2.inner), "No real copy of the attribute 'inner'.") self.failUnless(id(h1.outer) != id(h2.outer), "No real copy of the attribute 'outer'.") def test_equality(self): # Testing if the copy has the same digests. h1 = hmac.HMAC("key") h1.update("some random text") h2 = h1.copy() self.failUnless(h1.digest() == h2.digest(), "Digest of copy doesn't match original digest.") self.failUnless(h1.hexdigest() == h2.hexdigest(), "Hexdigest of copy doesn't match original hexdigest.") def test_main(): test_support.run_unittest( TestVectorsTestCase, ConstructorTestCase, SanityTestCase, CopyTestCase ) if __name__ == "__main__": test_main()
apache-2.0
Code-In-Action/python-in-action
flask/todo-api/flask/lib/python3.6/site-packages/setuptools/archive_util.py
95
6592
"""Utilities for extracting common archive formats""" import zipfile import tarfile import os import shutil import posixpath import contextlib from distutils.errors import DistutilsError from pkg_resources import ensure_directory __all__ = [ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", "UnrecognizedFormat", "extraction_drivers", "unpack_directory", ] class UnrecognizedFormat(DistutilsError): """Couldn't recognize the archive type""" def default_filter(src, dst): """The default progress/filter callback; returns True for all files""" return dst def unpack_archive(filename, extract_dir, progress_filter=default_filter, drivers=None): """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` `progress_filter` is a function taking two arguments: a source path internal to the archive ('/'-separated), and a filesystem path where it will be extracted. The callback must return the desired extract path (which may be the same as the one passed in), or else ``None`` to skip that file or directory. The callback can thus be used to report on the progress of the extraction, as well as to filter the items extracted or alter their extraction paths. `drivers`, if supplied, must be a non-empty sequence of functions with the same signature as this function (minus the `drivers` argument), that raise ``UnrecognizedFormat`` if they do not support extracting the designated archive type. The `drivers` are tried in sequence until one is found that does not raise an error, or until all are exhausted (in which case ``UnrecognizedFormat`` is raised). If you do not supply a sequence of drivers, the module's ``extraction_drivers`` constant will be used, which means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that order. """ for driver in drivers or extraction_drivers: try: driver(filename, extract_dir, progress_filter) except UnrecognizedFormat: continue else: return else: raise UnrecognizedFormat( "Not a recognized archive type: %s" % filename ) def unpack_directory(filename, extract_dir, progress_filter=default_filter): """"Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory """ if not os.path.isdir(filename): raise UnrecognizedFormat("%s is not a directory" % filename) paths = { filename: ('', extract_dir), } for base, dirs, files in os.walk(filename): src, dst = paths[base] for d in dirs: paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) for f in files: target = os.path.join(dst, f) target = progress_filter(src + f, target) if not target: # skip non-files continue ensure_directory(target) f = os.path.join(base, f) shutil.copyfile(f, target) shutil.copystat(f, target) def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): """Unpack zip `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument. """ if not zipfile.is_zipfile(filename): raise UnrecognizedFormat("%s is not a zip file" % (filename,)) with zipfile.ZipFile(filename) as z: for info in z.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name.split('/'): continue target = os.path.join(extract_dir, *name.split('/')) target = progress_filter(name, target) if not target: continue if name.endswith('/'): # directory ensure_directory(target) else: # file ensure_directory(target) data = z.read(info.filename) with open(target, 'wb') as f: f.write(data) unix_attributes = info.external_attr >> 16 if unix_attributes: os.chmod(target, unix_attributes) def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined by ``tarfile.open()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument. """ try: tarobj = tarfile.open(filename) except tarfile.TarError: raise UnrecognizedFormat( "%s is not a compressed or uncompressed tar file" % (filename,) ) with contextlib.closing(tarobj): # don't do any chowning! tarobj.chown = lambda *args: None for member in tarobj: name = member.name # don't extract absolute paths or ones with .. in them if not name.startswith('/') and '..' not in name.split('/'): prelim_dst = os.path.join(extract_dir, *name.split('/')) # resolve any links and to extract the link targets as normal # files while member is not None and (member.islnk() or member.issym()): linkpath = member.linkname if member.issym(): base = posixpath.dirname(member.name) linkpath = posixpath.join(base, linkpath) linkpath = posixpath.normpath(linkpath) member = tarobj._getmember(linkpath) if member is not None and (member.isfile() or member.isdir()): final_dst = progress_filter(name, prelim_dst) if final_dst: if final_dst.endswith(os.sep): final_dst = final_dst[:-1] try: # XXX Ugh tarobj._extract_member(member, final_dst) except tarfile.ExtractError: # chown/chmod/mkfifo/mknode/makedev failed pass return True extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
mit
Samsung/skia
third_party/externals/gyp/test/no-cpp/gyptest-no-cpp.py
7
1393
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Checks that C-only targets aren't linked against libstdc++. """ import TestGyp import re import subprocess import sys # set |match| to ignore build stderr output. test = TestGyp.TestGyp(match = lambda a, b: True) if sys.platform != 'win32' and test.format not in ('make', 'android'): # TODO: This doesn't pass with make. # TODO: Does a test like this make sense with Windows? Android? CHDIR = 'src' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', 'no_cpp', chdir=CHDIR) def LinksLibStdCpp(path): path = test.built_file_path(path, chdir=CHDIR) if sys.platform == 'darwin': proc = subprocess.Popen(['otool', '-L', path], stdout=subprocess.PIPE) else: proc = subprocess.Popen(['ldd', path], stdout=subprocess.PIPE) output = proc.communicate()[0] assert not proc.returncode return 'libstdc++' in output or 'libc++' in output if LinksLibStdCpp('no_cpp'): test.fail_test() build_error_code = { 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`) 'make': 2, 'ninja': 1, }[test.format] test.build('test.gyp', 'no_cpp_dep_on_cc_lib', chdir=CHDIR, status=build_error_code) test.pass_test()
bsd-3-clause
x684867/nemesis
src/node/tools/closure_linter/closure_linter/fixjsstyle.py
13
1338
#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Automatically fix simple style guide violations.""" __author__ = '[email protected] (Robert Walker)' import sys import gflags as flags from closure_linter import checker from closure_linter import error_fixer from closure_linter.common import simplefileflags as fileflags def main(argv = None): """Main function. Args: argv: Sequence of command line arguments. """ if argv is None: argv = flags.FLAGS(sys.argv) files = fileflags.GetFileList(argv, 'JavaScript', ['.js']) style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer()) # Check the list of files. for filename in files: style_checker.Check(filename) if __name__ == '__main__': main()
mit
alejob/mdanalysis
testsuite/MDAnalysisTests/topology/test_dlpoly.py
1
3036
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- http://www.mdanalysis.org # Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # from numpy.testing import ( assert_, assert_array_equal, ) import MDAnalysis as mda from MDAnalysisTests.topology.base import ParserBase from MDAnalysisTests.datafiles import ( DLP_CONFIG, DLP_CONFIG_order, DLP_CONFIG_minimal, DLP_HISTORY, DLP_HISTORY_order, DLP_HISTORY_minimal, ) class DLPUniverse(ParserBase): def test_creates_universe(self): u = mda.Universe(self.filename, topology_format=self.format) assert_(isinstance(u, mda.Universe)) class DLPBase2(DLPUniverse): expected_attrs = ['ids', 'names'] guessed_attrs = ['types', 'masses'] expected_n_atoms = 216 expected_n_residues = 1 expected_n_segments = 1 def test_names(self): assert_(self.top.names.values[0] == 'K+') assert_(self.top.names.values[4] == 'Cl-') class TestDLPHistoryParser(DLPBase2): parser = mda.topology.DLPolyParser.HistoryParser filename = DLP_HISTORY format='HISTORY' class TestDLPConfigParser(DLPBase2): parser = mda.topology.DLPolyParser.ConfigParser filename = DLP_CONFIG format='CONFIG' class DLPBase(DLPUniverse): expected_attrs = ['ids', 'names'] guessed_attrs = ['types', 'masses'] expected_n_atoms = 3 expected_n_residues = 1 expected_n_segments = 1 def test_dlp_names(self): assert_array_equal(self.top.names.values, ['C', 'B', 'A']) class TestDLPConfigMinimal(DLPBase): parser = mda.topology.DLPolyParser.ConfigParser filename = DLP_CONFIG_minimal format='CONFIG' class TestDLPConfigOrder(DLPBase): parser = mda.topology.DLPolyParser.ConfigParser filename = DLP_CONFIG_order format='CONFIG' class TestDLPHistoryMinimal(DLPBase): parser = mda.topology.DLPolyParser.HistoryParser filename = DLP_HISTORY_minimal format='HISTORY' class TestDLPHistoryOrder(DLPBase): parser = mda.topology.DLPolyParser.HistoryParser filename = DLP_HISTORY_order format='HISTORY'
gpl-2.0
flt/FitFinder
test/lib/jinja2/meta.py
336
4198
# -*- coding: utf-8 -*- """ jinja2.meta ~~~~~~~~~~~ This module implements various functions that exposes information about templates that might be interesting for various kinds of applications. :copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from jinja2 import nodes from jinja2.compiler import CodeGenerator from jinja2._compat import string_types class TrackingCodeGenerator(CodeGenerator): """We abuse the code generator for introspection.""" def __init__(self, environment): CodeGenerator.__init__(self, environment, '<introspection>', '<introspection>') self.undeclared_identifiers = set() def write(self, x): """Don't write.""" def pull_locals(self, frame): """Remember all undeclared identifiers.""" self.undeclared_identifiers.update(frame.identifiers.undeclared) def find_undeclared_variables(ast): """Returns a set of all variables in the AST that will be looked up from the context at runtime. Because at compile time it's not known which variables will be used depending on the path the execution takes at runtime, all variables are returned. >>> from jinja2 import Environment, meta >>> env = Environment() >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}') >>> meta.find_undeclared_variables(ast) == set(['bar']) True .. admonition:: Implementation Internally the code generator is used for finding undeclared variables. This is good to know because the code generator might raise a :exc:`TemplateAssertionError` during compilation and as a matter of fact this function can currently raise that exception as well. """ codegen = TrackingCodeGenerator(ast.environment) codegen.visit(ast) return codegen.undeclared_identifiers def find_referenced_templates(ast): """Finds all the referenced templates from the AST. This will return an iterator over all the hardcoded template extensions, inclusions and imports. If dynamic inheritance or inclusion is used, `None` will be yielded. >>> from jinja2 import Environment, meta >>> env = Environment() >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}') >>> list(meta.find_referenced_templates(ast)) ['layout.html', None] This function is useful for dependency tracking. For example if you want to rebuild parts of the website after a layout template has changed. """ for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)): if not isinstance(node.template, nodes.Const): # a tuple with some non consts in there if isinstance(node.template, (nodes.Tuple, nodes.List)): for template_name in node.template.items: # something const, only yield the strings and ignore # non-string consts that really just make no sense if isinstance(template_name, nodes.Const): if isinstance(template_name.value, string_types): yield template_name.value # something dynamic in there else: yield None # something dynamic we don't know about here else: yield None continue # constant is a basestring, direct template name if isinstance(node.template.value, string_types): yield node.template.value # a tuple or list (latter *should* not happen) made of consts, # yield the consts that are strings. We could warn here for # non string values elif isinstance(node, nodes.Include) and \ isinstance(node.template.value, (tuple, list)): for template_name in node.template.value: if isinstance(template_name, string_types): yield template_name # something else we don't care about, we could warn here else: yield None
apache-2.0
V11/volcano
server/sqlmap/lib/parse/banner.py
6
3551
#!/usr/bin/env python """ Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re from xml.sax.handler import ContentHandler from lib.core.common import Backend from lib.core.common import parseXmlFile from lib.core.common import sanitizeStr from lib.core.data import kb from lib.core.data import paths from lib.core.enums import DBMS from lib.parse.handler import FingerprintHandler class MSSQLBannerHandler(ContentHandler): """ This class defines methods to parse and extract information from the given Microsoft SQL Server banner based upon the data in XML file """ def __init__(self, banner, info): ContentHandler.__init__(self) self._banner = sanitizeStr(banner) self._inVersion = False self._inServicePack = False self._release = None self._version = "" self._versionAlt = None self._servicePack = "" self._info = info def _feedInfo(self, key, value): value = sanitizeStr(value) if value in (None, "None"): return self._info[key] = value def startElement(self, name, attrs): if name == "signatures": self._release = sanitizeStr(attrs.get("release")) elif name == "version": self._inVersion = True elif name == "servicepack": self._inServicePack = True def characters(self, data): if self._inVersion: self._version += sanitizeStr(data) elif self._inServicePack: self._servicePack += sanitizeStr(data) def endElement(self, name): if name == "signature": for version in (self._version, self._versionAlt): if version and re.search(r" %s[\.\ ]+" % re.escape(version), self._banner): self._feedInfo("dbmsRelease", self._release) self._feedInfo("dbmsVersion", self._version) self._feedInfo("dbmsServicePack", self._servicePack) break self._version = "" self._versionAlt = None self._servicePack = "" elif name == "version": self._inVersion = False self._version = self._version.replace(" ", "") match = re.search(r"\A(?P<major>\d+)\.00\.(?P<build>\d+)\Z", self._version) self._versionAlt = "%s.0.%s.0" % (match.group('major'), match.group('build')) if match else None elif name == "servicepack": self._inServicePack = False self._servicePack = self._servicePack.replace(" ", "") def bannerParser(banner): """ This function calls a class to extract information from the given DBMS banner based upon the data in XML file """ xmlfile = None if Backend.isDbms(DBMS.MSSQL): xmlfile = paths.MSSQL_XML elif Backend.isDbms(DBMS.MYSQL): xmlfile = paths.MYSQL_XML elif Backend.isDbms(DBMS.ORACLE): xmlfile = paths.ORACLE_XML elif Backend.isDbms(DBMS.PGSQL): xmlfile = paths.PGSQL_XML if not xmlfile: return if Backend.isDbms(DBMS.MSSQL): handler = MSSQLBannerHandler(banner, kb.bannerFp) parseXmlFile(xmlfile, handler) handler = FingerprintHandler(banner, kb.bannerFp) parseXmlFile(paths.GENERIC_XML, handler) else: handler = FingerprintHandler(banner, kb.bannerFp) parseXmlFile(xmlfile, handler) parseXmlFile(paths.GENERIC_XML, handler)
mit
liyu1990/tensorflow
tensorflow/python/ops/parsing_ops.py
3
14601
"""Parsing Ops.""" from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import common_shapes from tensorflow.python.ops import constant_op from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_parsing_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.gen_parsing_ops import * ops.NoGradient("DecodeRaw") ops.NoGradient("StringToNumber") # pylint: disable=protected-access def parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name="ParseExample"): """Parses `Example` protos. Parses a number of serialized [`Example`] (https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto) protos given in `serialized`. `names` may contain descriptive names for the corresponding serialized protos. These may be useful for debugging purposes, but they have no effect on the output. If not `None`, `names` must be the same length as `serialized`. This op parses serialized examples into a dictionary mapping keys to `Tensor` and `SparseTensor` objects respectively, depending on whether the keys appear in `sparse_keys` or `dense_keys`. The key `dense_keys[j]` is mapped to a `Tensor` of type `dense_types[j]` and of shape `(serialized.size(),) + dense_shapes[j]`. `dense_defaults` provides defaults for values referenced using `dense_keys`. If a key is not present in this dictionary, the corresponding dense `Feature` is required in all elements of `serialized`. `dense_shapes[j]` provides the shape of each `Feature` entry referenced by `dense_keys[j]`. The number of elements in the `Feature` corresponding to `dense_key[j]` must always have `np.prod(dense_shapes[j])` entries. The returned `Tensor` for `dense_key[j]` has shape `[N] + dense_shape[j]`, where `N` is the number of `Example`s in `serialized`. The key `sparse_keys[j]` is mapped to a `SparseTensor` of type `sparse_types[j]`. The `SparseTensor` represents a ragged matrix. Its indices are `[batch, index]` where `batch` is the batch entry the value is from, and `index` is the value's index in the list of values associated with that feature and example. Examples: For example, if one expects a `tf.float32` sparse feature `ft` and three serialized `Example`s are provided: ``` serialized = [ features: { feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] }, features: { feature: [] }, features: { feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] } ] ``` then the output will look like: ``` {"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]], values=[1.0, 2.0, 3.0], shape=(3, 2)) } ``` Given two `Example` input protos in `serialized`: ``` [ features: { feature: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } } feature: { key: "gps" value: { float_list: { value: [] } } } }, features: { feature: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } } feature: { key: "dank" value: { int64_list: { value: [ 42 ] } } } feature: { key: "gps" value: { } } } ] ``` And arguments ``` names: ["input0", "input1"], sparse_keys: ["kw", "dank", "gps"] sparse_types: [DT_STRING, DT_INT64, DT_FLOAT] ``` Then the output is a dictionary: ```python { "kw": SparseTensor( indices=[[0, 0], [0, 1], [1, 0]], values=["knit", "big", "emmy"] shape=[2, 2]), "dank": SparseTensor( indices=[[1, 0]], values=[42], shape=[2, 1]), "gps": SparseTensor( indices=[], values=[], shape=[2, 0]), } ``` For dense results in two serialized `Example`s: ``` [ features: { feature: { key: "age" value: { int64_list: { value: [ 0 ] } } } feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } } }, features: { feature: { key: "age" value: { int64_list: { value: [] } } } feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } } } ] ``` We can use arguments: ``` names: ["input0", "input1"], dense_keys: np.array(["age", "gender"]), dense_types: [tf.int64, tf.string], dense_defaults: { "age": -1 # "age" defaults to -1 if missing # "gender" has no specified default so it's required } dense_shapes: [(1,), (1,)], # age, gender, label, weight ``` And the expected output is: ```python { "age": [[0], [-1]], "gender": [["f"], ["f"]], } ``` Args: serialized: A list of strings, a batch of binary serialized `Example` protos. names: A list of strings, the names of the serialized protos. sparse_keys: A list of string keys in the examples' features. The results for these keys will be returned as `SparseTensor` objects. sparse_types: A list of `DTypes` of the same length as `sparse_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. dense_keys: A list of string keys in the examples' features. The results for these keys will be returned as `Tensor`s dense_types: A list of DTypes of the same length as `dense_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. dense_defaults: A dict mapping string keys to `Tensor`s. The keys of the dict must match the dense_keys of the feature. dense_shapes: A list of tuples with the same length as `dense_keys`. The shape of the data for each dense feature referenced by `dense_keys`. name: A name for this operation (optional). Returns: A `dict` mapping keys to `Tensor`s and `SparseTensor`s. Raises: ValueError: If sparse and dense key sets intersect, or input lengths do not match up. """ names = [] if names is None else names dense_defaults = {} if dense_defaults is None else dense_defaults sparse_keys = [] if sparse_keys is None else sparse_keys sparse_types = [] if sparse_types is None else sparse_types dense_keys = [] if dense_keys is None else dense_keys dense_types = [] if dense_types is None else dense_types dense_shapes = [ []] * len(dense_keys) if dense_shapes is None else dense_shapes num_dense = len(dense_keys) num_sparse = len(sparse_keys) if len(dense_shapes) != num_dense: raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d" % (len(dense_shapes), num_dense)) if len(dense_types) != num_dense: raise ValueError("len(dense_types) != len(num_dense): %d vs. %d" % (len(dense_types), num_dense)) if len(sparse_types) != num_sparse: raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d" % (len(sparse_types), num_sparse)) if num_dense + num_sparse == 0: raise ValueError("Must provide at least one sparse key or dense key") if not set(dense_keys).isdisjoint(set(sparse_keys)): raise ValueError( "Dense and sparse keys must not intersect; intersection: %s" % set(dense_keys).intersection(set(sparse_keys))) dense_defaults_vec = [] for i, key in enumerate(dense_keys): default_value = dense_defaults.get(key) if default_value is None: default_value = constant_op.constant([], dtype=dense_types[i]) elif not isinstance(default_value, ops.Tensor): default_value = ops.convert_to_tensor( default_value, dtype=dense_types[i], name=key) default_value = array_ops.reshape(default_value, dense_shapes[i]) dense_defaults_vec.append(default_value) dense_shapes = [tensor_util.MakeTensorShapeProto(shape) if isinstance(shape, (list, tuple)) else shape for shape in dense_shapes] outputs = gen_parsing_ops._parse_example( serialized=serialized, names=names, dense_defaults=dense_defaults_vec, sparse_keys=sparse_keys, sparse_types=sparse_types, dense_keys=dense_keys, dense_shapes=dense_shapes, name=name) (sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs sparse_tensors = [ops.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(sparse_indices, sparse_values, sparse_shapes)] return dict( zip(sparse_keys + dense_keys, sparse_tensors + dense_values)) def parse_single_example(serialized, # pylint: disable=invalid-name names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name="ParseSingleExample"): """Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (batch_size) entry of the shape vector is removed (it is now a single element vector). See also `parse_example`. Args: serialized: A scalar string, a single serialized Example. See parse_example documentation for more details. names: (Optional) A scalar string, the associated name. See parse_example documentation for more details. sparse_keys: See parse_example documentation for more details. sparse_types: See parse_example documentation for more details. dense_keys: See parse_example documentation for more details. dense_types: See parse_example documentation for more details. dense_defaults: See parse_example documentation for more details. dense_shapes: See parse_example documentation for more details. name: A name for this operation (optional). Returns: A dictionary mapping keys to Tensors and SparseTensors. Raises: ValueError: if "scalar" or "names" have known shapes, and are not scalars. """ with ops.op_scope([serialized], name, "parse_single_example"): serialized = ops.convert_to_tensor(serialized) serialized_shape = serialized.get_shape() if serialized_shape.ndims is not None: if serialized_shape.ndims != 0: raise ValueError("Input serialized must be a scalar") else: serialized = control_flow_ops.with_dependencies( [logging_ops.Assert( math_ops.equal(array_ops.rank(serialized), 0), ["Input serialized must be a scalar"], name="SerializedIsScalar")], serialized, name="SerializedDependencies") serialized = array_ops.expand_dims(serialized, 0) if names is not None: names = ops.convert_to_tensor(names) names_shape = names.get_shape() if names_shape.ndims is not None: if names_shape.ndims != 0: raise ValueError("Input names must be a scalar") else: names = control_flow_ops.with_dependencies( [logging_ops.Assert( math_ops.equal(array_ops.rank(names), 0), ["Input names must be a scalar"], name="NamesIsScalar")], names, name="NamesDependencies") names = array_ops.expand_dims(names, 0) outputs = parse_example(serialized, names=names, sparse_keys=sparse_keys, sparse_types=sparse_types, dense_keys=dense_keys, dense_types=dense_types, dense_defaults=dense_defaults, dense_shapes=dense_shapes, name=name) if dense_keys is not None: for d in dense_keys: outputs[d] = array_ops.squeeze(outputs[d], [0], name="Squeeze_%s" % d) if sparse_keys is not None: for s in sparse_keys: outputs[s] = ops.SparseTensor( array_ops.slice(outputs[s].indices, [0, 1], [-1, -1], name="Slice_Indices_%s" % s), outputs[s].values, array_ops.slice(outputs[s].shape, [1], [-1], name="Squeeze_Shape_%s" % s)) return outputs @ops.RegisterShape("ParseExample") def _ParseExampleShape(op): """Shape function for the ParseExample op.""" input_shape = op.inputs[0].get_shape().with_rank(1) num_sparse = op.get_attr("Nsparse") num_dense = op.get_attr("Ndense") dense_shapes = op.get_attr("dense_shapes") sparse_index_shapes = [ tensor_shape.matrix(None, 2) for _ in range(num_sparse)] sparse_value_shapes = [tensor_shape.vector(None) for _ in range(num_sparse)] sparse_shape_shapes = [tensor_shape.vector(2) for _ in range(num_sparse)] assert num_dense == len(dense_shapes) dense_shapes = [ input_shape.concatenate((d.size for d in dense_shape.dim)) for dense_shape in dense_shapes] return (sparse_index_shapes + sparse_value_shapes + sparse_shape_shapes + dense_shapes) ops.RegisterShape("StringToNumber")( common_shapes.unchanged_shape) @ops.RegisterShape("DecodeRaw") def _DecodeRawShape(op): """Shape function for the DecodeRaw op.""" # NOTE(mrry): Last dimension is data-dependent. return [op.inputs[0].get_shape().concatenate([None])] @ops.RegisterShape("DecodeCSV") def _DecodeCSVShape(op): """Shape function for the DecodeCSV op.""" input_shape = op.inputs[0].get_shape() # Optionally check that all of other inputs are scalar or empty. for default_input in op.inputs[1:]: default_input_shape = default_input.get_shape().with_rank(1) if default_input_shape[0] > 1: raise ValueError( "Shape of a default must be a length-0 or length-1 vector.") return [input_shape] * len(op.outputs)
apache-2.0
deployed/django
django/db/models/fields/related.py
3
97755
from operator import attrgetter from django.apps import apps from django.core import checks from django.db import connection, connections, router, transaction from django.db.backends import utils from django.db.models import signals, Q from django.db.models.deletion import SET_NULL, SET_DEFAULT, CASCADE from django.db.models.fields import (AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist) from django.db.models.lookups import IsNull from django.db.models.related import RelatedObject, PathInfo from django.db.models.query import QuerySet from django.db.models.sql.datastructures import Col from django.utils.encoding import smart_text from django.utils import six from django.utils.deprecation import RenameMethodsBase from django.utils.translation import ugettext_lazy as _ from django.utils.functional import curry, cached_property from django.core import exceptions from django import forms RECURSIVE_RELATIONSHIP_CONSTANT = 'self' def add_lazy_relation(cls, field, relation, operation): """ Adds a lookup on ``cls`` when a related field is defined using a string, i.e.:: class MyModel(Model): fk = ForeignKey("AnotherModel") This string can be: * RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive relation. * The name of a model (i.e "AnotherModel") to indicate another model in the same app. * An app-label and model name (i.e. "someapp.AnotherModel") to indicate another model in a different app. If the other model hasn't yet been loaded -- almost a given if you're using lazy relationships -- then the relation won't be set up until the class_prepared signal fires at the end of model initialization. operation is the work that must be performed once the relation can be resolved. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: app_label = cls._meta.app_label model_name = cls.__name__ else: # Look for an "app.Model" relation if isinstance(relation, six.string_types): try: app_label, model_name = relation.split(".") except ValueError: # If we can't split, assume a model in current app app_label = cls._meta.app_label model_name = relation else: # it's actually a model class app_label = relation._meta.app_label model_name = relation._meta.object_name # Try to look up the related model, and if it's already loaded resolve the # string right away. If get_model returns None, it means that the related # model isn't loaded yet, so we need to pend the relation until the class # is prepared. try: model = cls._meta.apps.get_registered_model(app_label, model_name) except LookupError: key = (app_label, model_name) value = (cls, field, operation) cls._meta.apps._pending_lookups.setdefault(key, []).append(value) else: operation(field, model, cls) def do_pending_lookups(sender, **kwargs): """ Handle any pending relations to the sending model. Sent from class_prepared. """ key = (sender._meta.app_label, sender.__name__) for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []): operation(field, sender, cls) signals.class_prepared.connect(do_pending_lookups) class RelatedField(Field): def check(self, **kwargs): errors = super(RelatedField, self).check(**kwargs) errors.extend(self._check_relation_model_exists()) errors.extend(self._check_referencing_to_swapped_model()) errors.extend(self._check_clashes()) return errors def _check_relation_model_exists(self): rel_is_missing = self.rel.to not in apps.get_models() rel_is_string = isinstance(self.rel.to, six.string_types) model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped): return [ checks.Error( ('The field has a relation with model %s, which ' 'has either not been installed or is abstract.') % model_name, hint=('Ensure that you did not misspell the model name and ' 'the model is not abstract. Does your INSTALLED_APPS ' 'setting contain the app where %s is defined?') % model_name, obj=self, id='E030', ) ] return [] def _check_referencing_to_swapped_model(self): if (self.rel.to not in apps.get_models() and not isinstance(self.rel.to, six.string_types) and self.rel.to._meta.swapped): model = "%s.%s" % ( self.rel.to._meta.app_label, self.rel.to._meta.object_name ) return [ checks.Error( ('The field defines a relation with the model %s, ' 'which has been swapped out.') % model, hint='Update the relation to point at settings.%s' % self.rel.to._meta.swappable, obj=self, id='E029', ) ] return [] def _check_clashes(self): """ Check accessor and reverse query name clashes. """ from django.db.models.base import ModelBase errors = [] opts = self.model._meta # `f.rel.to` may be a string instead of a model. Skip if model name is # not resolved. if not isinstance(self.rel.to, ModelBase): return [] # If the field doesn't install backward relation on the target model (so # `is_hidden` returns True), then there are no clashes to check and we # can skip these fields. if self.rel.is_hidden(): return [] try: self.related except AttributeError: return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) rel_opts = self.rel.to._meta # rel_opts.object_name == "Target" rel_name = self.related.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" field_name = "%s.%s" % (opts.object_name, self.name) # i. e. "Model.field" # Check clashes between accessor or reverse query name of `field` # and any other field name -- i. e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: clash_name = "%s.%s" % (rel_opts.object_name, clash_field.name) # i. e. "Target.model_set" if clash_field.name == rel_name: errors.append( checks.Error( 'Accessor for field %s clashes with field %s.' % (field_name, clash_name), hint=('Rename field %s or add/change a related_name ' 'argument to the definition for field %s.') % (clash_name, field_name), obj=self, id='E014', ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( 'Reverse query name for field %s clashes with field %s.' % (field_name, clash_name), hint=('Rename field %s or add/change a related_name ' 'argument to the definition for field %s.') % (clash_name, field_name), obj=self, id='E015', ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = rel_opts.get_all_related_many_to_many_objects() potential_clashes += rel_opts.get_all_related_objects() potential_clashes = (r for r in potential_clashes if r.field is not self) for clash_field in potential_clashes: clash_name = "%s.%s" % ( # i. e. "Model.m2m" clash_field.model._meta.object_name, clash_field.field.name) if clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( 'Clash between accessors for %s and %s.' % (field_name, clash_name), hint=('Add or change a related_name argument ' 'to the definition for %s or %s.') % (field_name, clash_name), obj=self, id='E016', ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( 'Clash between reverse query names for %s and %s.' % (field_name, clash_name), hint=('Add or change a related_name argument ' 'to the definition for %s or %s.') % (field_name, clash_name), obj=self, id='E017', ) ) return errors def db_type(self, connection): '''By default related field will not have a column as it relates columns to another table''' return None def contribute_to_class(self, cls, name, virtual_only=False): sup = super(RelatedField, self) # Store the opts for related_query_name() self.opts = cls._meta if hasattr(sup, 'contribute_to_class'): sup.contribute_to_class(cls, name, virtual_only=virtual_only) if not cls._meta.abstract and self.rel.related_name: related_name = self.rel.related_name % { 'class': cls.__name__.lower(), 'app_label': cls._meta.app_label.lower() } self.rel.related_name = related_name other = self.rel.to if isinstance(other, six.string_types) or other._meta.pk is None: def resolve_related_class(field, model, cls): field.rel.to = model field.do_related_class(model, cls) add_lazy_relation(cls, self, other, resolve_related_class) else: self.do_related_class(other, cls) @property def swappable_setting(self): """ Gets the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.rel.to, six.string_types): to_string = self.rel.to else: to_string = "%s.%s" % ( self.rel.to._meta.app_label, self.rel.to._meta.object_name, ) # See if anything swapped/swappable matches for model in apps.get_models(include_swapped=True): if model._meta.swapped: if model._meta.swapped == to_string: return model._meta.swappable if ("%s.%s" % (model._meta.app_label, model._meta.object_name)) == to_string and model._meta.swappable: return model._meta.swappable return None def set_attributes_from_rel(self): self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name) if self.verbose_name is None: self.verbose_name = self.rel.to._meta.verbose_name self.rel.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.related = RelatedObject(other, cls, self) if not cls._meta.abstract: self.contribute_to_related_class(other, self.related) def get_limit_choices_to(self): """Returns 'limit_choices_to' for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.rel.limit_choices_to): return self.rel.limit_choices_to() return self.rel.limit_choices_to def formfield(self, **kwargs): """Passes ``limit_choices_to`` to field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.rel, 'get_related_field'): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.rel.limit_choices_to defaults.update({ 'limit_choices_to': limit_choices_to, }) defaults.update(kwargs) return super(RelatedField, self).formfield(**defaults) def related_query_name(self): # This method defines the name that can be used to identify this # related object in a table-spanning query. It uses the lower-cased # object_name by default, but this can be overridden with the # "related_name" option. return self.rel.related_query_name or self.rel.related_name or self.opts.model_name class RenameRelatedObjectDescriptorMethods(RenameMethodsBase): renamed_methods = ( ('get_query_set', 'get_queryset', DeprecationWarning), ('get_prefetch_query_set', 'get_prefetch_queryset', DeprecationWarning), ) class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)): # This class provides the functionality that makes the related-object # managers available as attributes on a model class, for fields that have # a single "remote" value, on the class pointed to by a related field. # In the example "place.restaurant", the restaurant attribute is a # SingleRelatedObjectDescriptor instance. def __init__(self, related): self.related = related self.cache_name = related.get_cache_name() @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ReverseSingleRelatedObjectDescriptor`. return type( str('RelatedObjectDoesNotExist'), (self.related.model.DoesNotExist, AttributeError), {} ) def is_cached(self, instance): return hasattr(instance, self.cache_name) def get_queryset(self, **hints): return self.related.model._base_manager.db_manager(hints=hints) def get_prefetch_queryset(self, instances, queryset=None): if queryset is not None: raise ValueError("Custom queryset can't be used for this lookup.") rel_obj_attr = attrgetter(self.related.field.attname) instance_attr = lambda obj: obj._get_pk_val() instances_dict = dict((instance_attr(inst), inst) for inst in instances) query = {'%s__in' % self.related.field.name: instances} qs = self.get_queryset(instance=instances[0]).filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. rel_obj_cache_name = self.related.field.get_cache_name() for rel_obj in qs: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, rel_obj_cache_name, instance) return qs, rel_obj_attr, instance_attr, True, self.cache_name def __get__(self, instance, instance_type=None): if instance is None: return self try: rel_obj = getattr(instance, self.cache_name) except AttributeError: related_pk = instance._get_pk_val() if related_pk is None: rel_obj = None else: params = {} for lh_field, rh_field in self.related.field.related_fields: params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname) try: rel_obj = self.get_queryset(instance=instance).get(**params) except self.related.model.DoesNotExist: rel_obj = None else: setattr(rel_obj, self.related.field.get_cache_name(), instance) setattr(instance, self.cache_name, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % ( instance.__class__.__name__, self.related.get_accessor_name() ) ) else: return rel_obj def __set__(self, instance, value): # The similarity of the code below to the code in # ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. # If null=True, we can assign null here, but otherwise the value needs # to be an instance of the related class. if value is None and self.related.field.null is False: raise ValueError( 'Cannot assign None: "%s.%s" does not allow null values.' % ( instance._meta.object_name, self.related.get_accessor_name(), ) ) elif value is not None and not isinstance(value, self.related.model): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.opts.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write(instance.__class__, instance=value) elif value._state.db is None: value._state.db = router.db_for_write(value.__class__, instance=instance) elif value._state.db is not None and instance._state.db is not None: if not router.allow_relation(value, instance): raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields) if None in related_pk: raise ValueError( 'Cannot assign "%r": "%s" instance isn\'t saved in the database.' % (value, instance._meta.object_name) ) # Set the value of the related field to the value of the related object's related field for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Since we already know what the related object is, seed the related # object caches now, too. This avoids another db hit if you get the # object you just set. setattr(instance, self.cache_name, value) setattr(value, self.related.field.get_cache_name(), instance) class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)): # This class provides the functionality that makes the related-object # managers available as attributes on a model class, for fields that have # a single "remote" value, on the class that defines the related field. # In the example "choice.poll", the poll attribute is a # ReverseSingleRelatedObjectDescriptor instance. def __init__(self, field_with_rel): self.field = field_with_rel self.cache_name = self.field.get_cache_name() @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `rel.to` might still be # a string model reference. return type( str('RelatedObjectDoesNotExist'), (self.field.rel.to.DoesNotExist, AttributeError), {} ) def is_cached(self, instance): return hasattr(instance, self.cache_name) def get_queryset(self, **hints): rel_mgr = self.field.rel.to._default_manager.db_manager(hints=hints) # If the related manager indicates that it should be used for # related fields, respect that. if getattr(rel_mgr, 'use_for_related_fields', False): return rel_mgr else: return QuerySet(self.field.rel.to, hints=hints) def get_prefetch_queryset(self, instances, queryset=None): if queryset is not None: raise ValueError("Custom queryset can't be used for this lookup.") rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = dict((instance_attr(inst), inst) for inst in instances) related_field = self.field.foreign_related_fields[0] # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. if self.field.rel.is_hidden(): query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)} else: query = {'%s__in' % self.field.related_query_name(): instances} qs = self.get_queryset(instance=instances[0]).filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not self.field.rel.multiple: rel_obj_cache_name = self.field.related.get_cache_name() for rel_obj in qs: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, rel_obj_cache_name, instance) return qs, rel_obj_attr, instance_attr, True, self.cache_name def __get__(self, instance, instance_type=None): if instance is None: return self try: rel_obj = getattr(instance, self.cache_name) except AttributeError: val = self.field.get_local_related_value(instance) if None in val: rel_obj = None else: params = dict( (rh_field.attname, getattr(instance, lh_field.attname)) for lh_field, rh_field in self.field.related_fields) qs = self.get_queryset(instance=instance) extra_filter = self.field.get_extra_descriptor_filter(instance) if isinstance(extra_filter, dict): params.update(extra_filter) qs = qs.filter(**params) else: qs = qs.filter(extra_filter, **params) # Assuming the database enforces foreign keys, this won't fail. rel_obj = qs.get() if not self.field.rel.multiple: setattr(rel_obj, self.field.related.get_cache_name(), instance) setattr(instance, self.cache_name, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): # If null=True, we can assign null here, but otherwise the value needs # to be an instance of the related class. if value is None and self.field.null is False: raise ValueError( 'Cannot assign None: "%s.%s" does not allow null values.' % (instance._meta.object_name, self.field.name) ) elif value is not None and not isinstance(value, self.field.rel.to): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.rel.to._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write(instance.__class__, instance=value) elif value._state.db is None: value._state.db = router.db_for_write(value.__class__, instance=instance) elif value._state.db is not None and instance._state.db is not None: if not router.allow_relation(value, instance): raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = getattr(instance, self.cache_name, None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: setattr(related, self.field.related.get_cache_name(), None) # Set the value of the related field for lh_field, rh_field in self.field.related_fields: try: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) except AttributeError: setattr(instance, lh_field.attname, None) # Since we already know what the related object is, seed the related # object caches now, too. This avoids another db hit if you get the # object you just set. setattr(instance, self.cache_name, value) if value is not None and not self.field.rel.multiple: setattr(value, self.field.related.get_cache_name(), instance) def create_foreign_related_manager(superclass, rel_field, rel_model): class RelatedManager(superclass): def __init__(self, instance): super(RelatedManager, self).__init__() self.instance = instance self.core_filters = {'%s__exact' % rel_field.name: instance} self.model = rel_model def __call__(self, **kwargs): # We use **kwargs rather than a kwarg argument to enforce the # `manager='manager_name'` syntax. manager = getattr(self.model, kwargs.pop('manager')) manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model) return manager_class(self.instance) do_not_call_in_templates = True def get_queryset(self): try: return self.instance._prefetched_objects_cache[rel_field.related_query_name()] except (AttributeError, KeyError): db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls qs = super(RelatedManager, self).get_queryset() qs._add_hints(instance=self.instance) if self._db: qs = qs.using(self._db) qs = qs.filter(**self.core_filters) for field in rel_field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == '' and empty_strings_as_null): return qs.none() qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}} return qs def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super(RelatedManager, self).get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = rel_field.get_local_related_value instance_attr = rel_field.get_foreign_related_value instances_dict = dict((instance_attr(inst), inst) for inst in instances) query = {'%s__in' % rel_field.name: instances} queryset = queryset.filter(**query) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, rel_field.name, instance) cache_name = rel_field.related_query_name() return queryset, rel_obj_attr, instance_attr, False, cache_name def add(self, *objs): objs = list(objs) db = router.db_for_write(self.model, instance=self.instance) with transaction.commit_on_success_unless_managed( using=db, savepoint=False): for obj in objs: if not isinstance(obj, self.model): raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)) setattr(obj, rel_field.name, self.instance) obj.save() add.alters_data = True def create(self, **kwargs): kwargs[rel_field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): # Update kwargs with the related object that this # ForeignRelatedObjectsDescriptor knows about. kwargs[rel_field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a value of null. if rel_field.null: def remove(self, *objs, **kwargs): if not objs: return bulk = kwargs.pop('bulk', True) val = rel_field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: # Is obj actually part of this descriptor set? if rel_field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance)) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True def clear(self, **kwargs): bulk = kwargs.pop('bulk', True) self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: queryset.update(**{rel_field.name: None}) else: with transaction.commit_on_success_unless_managed(using=db, savepoint=False): for obj in queryset: setattr(obj, rel_field.name, None) obj.save(update_fields=[rel_field.name]) _clear.alters_data = True return RelatedManager class ForeignRelatedObjectsDescriptor(object): # This class provides the functionality that makes the related-object # managers available as attributes on a model class, for fields that have # multiple "remote" values and have a ForeignKey pointed at them by # some other model. In the example "poll.choice_set", the choice_set # attribute is a ForeignRelatedObjectsDescriptor instance. def __init__(self, related): self.related = related # RelatedObject instance def __get__(self, instance, instance_type=None): if instance is None: return self return self.related_manager_cls(instance) def __set__(self, instance, value): manager = self.__get__(instance) # If the foreign key can support nulls, then completely clear the related set. # Otherwise, just move the named objects into the set. if self.related.field.null: manager.clear() manager.add(*value) @cached_property def related_manager_cls(self): # Dynamically create a class that subclasses the related model's default # manager. return create_foreign_related_manager( self.related.model._default_manager.__class__, self.related.field, self.related.model, ) def create_many_related_manager(superclass, rel): """Creates a manager that subclasses 'superclass' (which is a Manager) and adds behavior for many-to-many related objects.""" class ManyRelatedManager(superclass): def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None, source_field_name=None, target_field_name=None, reverse=False, through=None, prefetch_cache_name=None): super(ManyRelatedManager, self).__init__() self.model = model self.query_field_name = query_field_name source_field = through._meta.get_field(source_field_name) source_related_fields = source_field.related_fields self.core_filters = {} for lh_field, rh_field in source_related_fields: self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname) self.instance = instance self.symmetrical = symmetrical self.source_field = source_field self.target_field = through._meta.get_field(target_field_name) self.source_field_name = source_field_name self.target_field_name = target_field_name self.reverse = reverse self.through = through self.prefetch_cache_name = prefetch_cache_name self.related_val = source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError('"%r" needs to have a value for field "%s" before ' 'this many-to-many relationship can be used.' % (instance, source_field_name)) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError("%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__) def __call__(self, **kwargs): # We use **kwargs rather than a kwarg argument to enforce the # `manager='manager_name'` syntax. manager = getattr(self.model, kwargs.pop('manager')) manager_class = create_many_related_manager(manager.__class__, rel) return manager_class( model=self.model, query_field_name=self.query_field_name, instance=self.instance, symmetrical=self.symmetrical, source_field_name=self.source_field_name, target_field_name=self.target_field_name, reverse=self.reverse, through=self.through, prefetch_cache_name=self.prefetch_cache_name, ) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q(**{self.source_field_name: self.related_val}) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = (not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()) if removed_vals_filters: filters &= Q(**{'%s__in' % self.target_field_name: removed_vals}) if self.symmetrical: symmetrical_filters = Q(**{self.target_field_name: self.related_val}) if removed_vals_filters: symmetrical_filters &= Q( **{'%s__in' % self.source_field_name: removed_vals}) filters |= symmetrical_filters return filters def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): qs = super(ManyRelatedManager, self).get_queryset() qs._add_hints(instance=self.instance) if self._db: qs = qs.using(self._db) return qs._next_is_sticky().filter(**self.core_filters) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super(ManyRelatedManager, self).get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) query = {'%s__in' % self.query_field_name: instances} queryset = queryset._next_is_sticky().filter(**query) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = self.through._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra(select=dict( ('_prefetch_related_val_%s' % f.attname, '%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields)) return (queryset, lambda result: tuple(getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields), lambda inst: tuple(getattr(inst, f.attname) for f in fk.foreign_related_fields), False, self.prefetch_cache_name) def add(self, *objs): if not rel.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot use add() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) self._add_items(self.source_field_name, self.target_field_name, *objs) # If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table if self.symmetrical: self._add_items(self.target_field_name, self.source_field_name, *objs) add.alters_data = True def remove(self, *objs): if not rel.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot use remove() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) signals.m2m_changed.send(sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db) filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send(sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db) clear.alters_data = True def create(self, **kwargs): # This check needs to be done here, since we can't later remove this # from the method lookup table, as we do with add and remove. if not self.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj) return new_obj create.alters_data = True def get_or_create(self, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = \ super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj) return obj, created get_or_create.alters_data = True def _add_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys of object instances. # If there aren't any objects, there is nothing to do. from django.db.models import Model if objs: new_ids = set() for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) fk_val = self.through._meta.get_field( target_field_name).get_foreign_related_value(obj)[0] if fk_val is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) new_ids.add(fk_val) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: new_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True) vals = vals.filter(**{ source_field_name: self.related_val[0], '%s__in' % target_field_name: new_ids, }) new_ids = new_ids - set(vals) if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send(sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db) # Add the ones that aren't there already self.through._default_manager.using(db).bulk_create([ self.through(**{ '%s_id' % source_field_name: self.related_val[0], '%s_id' % target_field_name: obj_id, }) for obj_id in new_ids ]) if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send(sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) # Send a signal to the other end if need be. signals.m2m_changed.send(sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db) target_model_qs = super(ManyRelatedManager, self).get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter(**{ '%s__in' % self.target_field.related_field.attname: old_ids}) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send(sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db) return ManyRelatedManager class ManyRelatedObjectsDescriptor(object): # This class provides the functionality that makes the related-object # managers available as attributes on a model class, for fields that have # multiple "remote" values and have a ManyToManyField pointed at them by # some other model (rather than having a ManyToManyField themselves). # In the example "publication.article_set", the article_set attribute is a # ManyRelatedObjectsDescriptor instance. def __init__(self, related): self.related = related # RelatedObject instance @cached_property def related_manager_cls(self): # Dynamically create a class that subclasses the related # model's default manager. return create_many_related_manager( self.related.model._default_manager.__class__, self.related.field.rel ) def __get__(self, instance, instance_type=None): if instance is None: return self rel_model = self.related.model manager = self.related_manager_cls( model=rel_model, query_field_name=self.related.field.name, prefetch_cache_name=self.related.field.related_query_name(), instance=instance, symmetrical=False, source_field_name=self.related.field.m2m_reverse_field_name(), target_field_name=self.related.field.m2m_field_name(), reverse=True, through=self.related.field.rel.through, ) return manager def __set__(self, instance, value): if not self.related.field.rel.through._meta.auto_created: opts = self.related.field.rel.through._meta raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)) manager = self.__get__(instance) manager.clear() manager.add(*value) class ReverseManyRelatedObjectsDescriptor(object): # This class provides the functionality that makes the related-object # managers available as attributes on a model class, for fields that have # multiple "remote" values and have a ManyToManyField defined in their # model (rather than having another model pointed *at* them). # In the example "article.publications", the publications attribute is a # ReverseManyRelatedObjectsDescriptor instance. def __init__(self, m2m_field): self.field = m2m_field @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.field.rel.through @cached_property def related_manager_cls(self): # Dynamically create a class that subclasses the related model's # default manager. return create_many_related_manager( self.field.rel.to._default_manager.__class__, self.field.rel ) def __get__(self, instance, instance_type=None): if instance is None: return self manager = self.related_manager_cls( model=self.field.rel.to, query_field_name=self.field.related_query_name(), prefetch_cache_name=self.field.name, instance=instance, symmetrical=self.field.rel.symmetrical, source_field_name=self.field.m2m_field_name(), target_field_name=self.field.m2m_reverse_field_name(), reverse=False, through=self.field.rel.through, ) return manager def __set__(self, instance, value): if not self.field.rel.through._meta.auto_created: opts = self.field.rel.through._meta raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)) manager = self.__get__(instance) # clear() can change expected output of 'value' queryset, we force evaluation # of queryset before clear; ticket #19816 value = tuple(value) manager.clear() manager.add(*value) class ForeignObjectRel(object): def __init__(self, field, to, related_name=None, limit_choices_to=None, parent_link=False, on_delete=None, related_query_name=None): try: to._meta except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT self.field = field self.to = to self.related_name = related_name self.related_query_name = related_query_name self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to self.multiple = True self.parent_link = parent_link self.on_delete = on_delete def is_hidden(self): "Should the related object be hidden?" return self.related_name and self.related_name[-1] == '+' def get_joining_columns(self): return self.field.get_reverse_joining_columns() def get_extra_restriction(self, where_class, alias, related_alias): return self.field.get_extra_restriction(where_class, related_alias, alias) def set_field_name(self): """ Sets the related field's name, this is not available until later stages of app loading, so set_field_name is called from set_attributes_from_rel() """ # By default foreign object doesn't relate to any remote field (for # example custom multicolumn joins currently have no remote field). self.field_name = None def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type, raw_value): return self.field.get_lookup_constraint(constraint_class, alias, targets, sources, lookup_type, raw_value) class ManyToOneRel(ForeignObjectRel): def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None, parent_link=False, on_delete=None, related_query_name=None): super(ManyToOneRel, self).__init__( field, to, related_name=related_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name) self.field_name = field_name def get_related_field(self): """ Returns the Field in the 'to' object to which this relationship is tied. """ data = self.to._meta.get_field_by_name(self.field_name) if not data[2]: raise FieldDoesNotExist("No related field named '%s'" % self.field_name) return data[0] def set_field_name(self): self.field_name = self.field_name or self.to._meta.pk.name class OneToOneRel(ManyToOneRel): def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None, parent_link=False, on_delete=None, related_query_name=None): super(OneToOneRel, self).__init__(field, to, field_name, related_name=related_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name) self.multiple = False class ManyToManyRel(object): def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True, through=None, db_constraint=True, related_query_name=None): if through and not db_constraint: raise ValueError("Can't supply a through model and db_constraint=False") self.to = to self.related_name = related_name self.related_query_name = related_query_name if limit_choices_to is None: limit_choices_to = {} self.limit_choices_to = limit_choices_to self.symmetrical = symmetrical self.multiple = True self.through = through self.db_constraint = db_constraint def is_hidden(self): "Should the related object be hidden?" return self.related_name and self.related_name[-1] == '+' def get_related_field(self): """ Returns the field in the to' object to which this relationship is tied (this is always the primary key on the target model). Provided for symmetry with ManyToOneRel. """ return self.to._meta.pk class ForeignObject(RelatedField): requires_unique_target = True generate_reverse_relation = True related_accessor_class = ForeignRelatedObjectsDescriptor def __init__(self, to, from_fields, to_fields, swappable=True, **kwargs): self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable if 'rel' not in kwargs: kwargs['rel'] = ForeignObjectRel( self, to, related_name=kwargs.pop('related_name', None), related_query_name=kwargs.pop('related_query_name', None), limit_choices_to=kwargs.pop('limit_choices_to', None), parent_link=kwargs.pop('parent_link', False), on_delete=kwargs.pop('on_delete', CASCADE), ) kwargs['verbose_name'] = kwargs.get('verbose_name', None) super(ForeignObject, self).__init__(**kwargs) def check(self, **kwargs): errors = super(ForeignObject, self).check(**kwargs) errors.extend(self._check_unique_target()) return errors def _check_unique_target(self): rel_is_string = isinstance(self.rel.to, six.string_types) if rel_is_string or not self.requires_unique_target: return [] # Skip if the try: self.foreign_related_fields except FieldDoesNotExist: return [] try: self.related except AttributeError: return [] has_unique_field = any(rel_field.unique for rel_field in self.foreign_related_fields) if not has_unique_field and len(self.foreign_related_fields) > 1: field_combination = ','.join(rel_field.name for rel_field in self.foreign_related_fields) model_name = self.rel.to.__name__ return [ checks.Error( ('No unique=True constraint ' 'on field combination "%s" under model %s.') % (field_combination, model_name), hint=('Set unique=True argument on any of the fields ' '"%s" under model %s.') % (field_combination, model_name), obj=self, id='E018', ) ] elif not has_unique_field: field_name = self.foreign_related_fields[0].name model_name = self.rel.to.__name__ return [ checks.Error( ('%s.%s must have unique=True ' 'because it is referenced by a foreign key.') % (model_name, field_name), hint=None, obj=self, id='E019', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super(ForeignObject, self).deconstruct() kwargs['from_fields'] = self.from_fields kwargs['to_fields'] = self.to_fields # Work out string form of "to" if isinstance(self.rel.to, six.string_types): kwargs['to'] = self.rel.to else: kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name) # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError("Cannot deconstruct a ForeignKey pointing to a model that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting)) # Set it from django.db.migrations.writer import SettingsReference kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields): raise ValueError('Foreign Object from and to fields must be the same non-zero length') if isinstance(self.rel.to, six.string_types): raise ValueError('Related model %r cannot be resolved' % self.rel.to) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = (self if from_field_name == 'self' else self.opts.get_field_by_name(from_field_name)[0]) to_field = (self.rel.to._meta.pk if to_field_name is None else self.rel.to._meta.get_field_by_name(to_field_name)[0]) related_fields.append((from_field, to_field)) return related_fields @property def related_fields(self): if not hasattr(self, '_related_fields'): self._related_fields = self.resolve_related_fields() return self._related_fields @property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @property def foreign_related_fields(self): return tuple(rhs_field for lhs_field, rhs_field in self.related_fields) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. opts = instance._meta if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if not possible_parent_link or possible_parent_link.primary_key: ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super(ForeignObject, self).get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Returns an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, where_class, alias, related_alias): """ Returns a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(qn, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self): """ Get path from this field to the related model. """ opts = self.rel.to._meta from_opts = self.model._meta return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)] def get_reverse_path_info(self): """ Get path from the related model to this field's model. """ opts = self.model._meta from_opts = self.rel.to._meta pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)] return pathinfos def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookups, raw_value): from django.db.models.sql.where import SubqueryConstraint, AND, OR root_constraint = constraint_class() assert len(targets) == len(sources) if len(lookups) > 1: raise exceptions.FieldError('Relation fields do not support nested lookups') lookup_type = lookups[0] def get_normalized_value(value): from django.db.models import Model if isinstance(value, Model): value_list = [] for source in sources: # Account for one-to-one relations when sent a different model while not isinstance(value, source.model) and source.rel: source = source.rel.to._meta.get_field(source.rel.field_name) value_list.append(getattr(value, source.attname)) return tuple(value_list) elif not isinstance(value, tuple): return (value,) return value is_multicolumn = len(self.related_fields) > 1 if (hasattr(raw_value, '_as_sql') or hasattr(raw_value, 'get_compiler')): root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets], [source.name for source in sources], raw_value), AND) elif lookup_type == 'isnull': root_constraint.add(IsNull(Col(alias, targets[0], sources[0]), raw_value), AND) elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte'] and not is_multicolumn)): value = get_normalized_value(raw_value) for target, source, val in zip(targets, sources, value): lookup_class = target.get_lookup(lookup_type) root_constraint.add( lookup_class(Col(alias, target, source), val), AND) elif lookup_type in ['range', 'in'] and not is_multicolumn: values = [get_normalized_value(value) for value in raw_value] value = [val[0] for val in values] lookup_class = targets[0].get_lookup(lookup_type) root_constraint.add(lookup_class(Col(alias, targets[0], sources[0]), value), AND) elif lookup_type == 'in': values = [get_normalized_value(value) for value in raw_value] for value in values: value_constraint = constraint_class() for source, target, val in zip(sources, targets, value): lookup_class = target.get_lookup('exact') lookup = lookup_class(Col(alias, target, source), val) value_constraint.add(lookup, AND) root_constraint.add(value_constraint, OR) else: raise TypeError('Related Field got invalid lookup: %s' % lookup_type) return root_constraint @property def attnames(self): return tuple(field.attname for field in self.local_related_fields) def get_defaults(self): return tuple(field.get_default() for field in self.local_related_fields) def contribute_to_class(self, cls, name, virtual_only=False): super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only) setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if not self.rel.is_hidden() and not related.model._meta.swapped: setattr(cls, related.get_accessor_name(), self.related_accessor_class(related)) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.rel.limit_choices_to: cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to) class ForeignKey(ForeignObject): empty_strings_allowed = False default_error_messages = { 'invalid': _('%(model)s instance with pk %(pk)r does not exist.') } description = _("Foreign Key (type determined by related field)") def __init__(self, to, to_field=None, rel_class=ManyToOneRel, db_constraint=True, **kwargs): try: to._meta.model_name except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if 'db_index' not in kwargs: kwargs['db_index'] = True self.db_constraint = db_constraint kwargs['rel'] = rel_class( self, to, to_field, related_name=kwargs.pop('related_name', None), related_query_name=kwargs.pop('related_query_name', None), limit_choices_to=kwargs.pop('limit_choices_to', None), parent_link=kwargs.pop('parent_link', False), on_delete=kwargs.pop('on_delete', CASCADE), ) super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs) def check(self, **kwargs): errors = super(ForeignKey, self).check(**kwargs) errors.extend(self._check_on_delete()) return errors def _check_on_delete(self): on_delete = getattr(self.rel, 'on_delete', None) if on_delete == SET_NULL and not self.null: return [ checks.Error( 'The field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field.', obj=self, id='E020', ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( 'The field specifies on_delete=SET_DEFAULT, but has no default value.', hint=None, obj=self, id='E021', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super(ForeignKey, self).deconstruct() del kwargs['to_fields'] del kwargs['from_fields'] # Handle the simpler arguments if self.db_index: del kwargs['db_index'] else: kwargs['db_index'] = False if self.db_constraint is not True: kwargs['db_constraint'] = self.db_constraint if self.rel.on_delete is not CASCADE: kwargs['on_delete'] = self.rel.on_delete # Rel needs more work. if self.rel.field_name: kwargs['to_field'] = self.rel.field_name return name, path, args, kwargs @property def related_field(self): return self.foreign_related_fields[0] def get_reverse_path_info(self): """ Get path from the related model to this field's model. """ opts = self.model._meta from_opts = self.rel.to._meta pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)] return pathinfos def validate(self, value, model_instance): if self.rel.parent_link: return super(ForeignKey, self).validate(value, model_instance) if value is None: return using = router.db_for_read(model_instance.__class__, instance=model_instance) qs = self.rel.to._default_manager.using(using).filter( **{self.rel.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'model': self.rel.to._meta.verbose_name, 'pk': value}, ) def get_attname(self): return '%s_id' % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_validator_unique_lookup_type(self): return '%s__%s__exact' % (self.name, self.related_field.name) def get_default(self): "Here we check if the default value is an object and return the to_field if so." field_default = super(ForeignKey, self).get_default() if isinstance(field_default, self.rel.to): return getattr(field_default, self.related_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or (value == '' and (not self.related_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls)): return None else: return self.related_field.get_db_prep_save(value, connection=connection) def value_to_string(self, obj): if not obj: # In required many-to-one fields with only one available choice, # select that one available choice. Note: For SelectFields # we have to check that the length of choices is *2*, not 1, # because SelectFields always have an initial "blank" value. if not self.blank and self.choices: choice_list = self.get_choices_default() if len(choice_list) == 2: return smart_text(choice_list[1][0]) return super(ForeignKey, self).value_to_string(obj) def contribute_to_related_class(self, cls, related): super(ForeignKey, self).contribute_to_related_class(cls, related) if self.rel.field_name is None: self.rel.field_name = cls._meta.pk.name def formfield(self, **kwargs): db = kwargs.pop('using', None) if isinstance(self.rel.to, six.string_types): raise ValueError("Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.rel.to)) defaults = { 'form_class': forms.ModelChoiceField, 'queryset': self.rel.to._default_manager.using(db), 'to_field_name': self.rel.field_name, } defaults.update(kwargs) return super(ForeignKey, self).formfield(**defaults) def db_type(self, connection): # The database column type of a ForeignKey is the column type # of the field to which it points. An exception is if the ForeignKey # points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField, # in which case the column type is simply that of an IntegerField. # If the database needs similar types for key fields however, the only # thing we can do is making AutoField an IntegerField. rel_field = self.related_field if (isinstance(rel_field, AutoField) or (not connection.features.related_fields_match_type and isinstance(rel_field, (PositiveIntegerField, PositiveSmallIntegerField)))): return IntegerField().db_type(connection=connection) return rel_field.db_type(connection=connection) def db_parameters(self, connection): return {"type": self.db_type(connection), "check": []} class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ related_accessor_class = SingleRelatedObjectDescriptor description = _("One-to-one relationship") def __init__(self, to, to_field=None, **kwargs): kwargs['unique'] = True super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs) def deconstruct(self): name, path, args, kwargs = super(OneToOneField, self).deconstruct() if "unique" in kwargs: del kwargs['unique'] return name, path, args, kwargs def formfield(self, **kwargs): if self.rel.parent_link: return None return super(OneToOneField, self).formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.rel.to): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) def create_many_to_many_intermediary_model(field, klass): from django.db import models managed = True if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT: to_model = field.rel.to to = to_model.split('.')[-1] def set_managed(field, model, cls): field.rel.through._meta.managed = model._meta.managed or cls._meta.managed add_lazy_relation(klass, field, to_model, set_managed) elif isinstance(field.rel.to, six.string_types): to = klass._meta.object_name to_model = klass managed = klass._meta.managed else: to = field.rel.to._meta.object_name to_model = field.rel.to managed = klass._meta.managed or to_model._meta.managed name = '%s_%s' % (klass._meta.object_name, field.name) if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name: from_ = 'from_%s' % to.lower() to = 'to_%s' % to.lower() else: from_ = klass._meta.model_name to = to.lower() meta = type('Meta', (object,), { 'db_table': field._get_m2m_db_table(klass._meta), 'managed': managed, 'auto_created': klass, 'app_label': klass._meta.app_label, 'db_tablespace': klass._meta.db_tablespace, 'unique_together': (from_, to), 'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to}, 'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to}, 'apps': field.model._meta.apps, }) # Construct and return the new class. return type(str(name), (models.Model,), { 'Meta': meta, '__module__': klass.__module__, from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint), to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint) }) class ManyToManyField(RelatedField): description = _("Many-to-many relationship") def __init__(self, to, db_constraint=True, swappable=True, **kwargs): try: to._meta except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT) # Class names must be ASCII in Python 2.x, so we forcibly coerce it here to break early if there's a problem. to = str(to) kwargs['verbose_name'] = kwargs.get('verbose_name', None) kwargs['rel'] = ManyToManyRel(to, related_name=kwargs.pop('related_name', None), related_query_name=kwargs.pop('related_query_name', None), limit_choices_to=kwargs.pop('limit_choices_to', None), symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT), through=kwargs.pop('through', None), db_constraint=db_constraint, ) self.swappable = swappable self.db_table = kwargs.pop('db_table', None) if kwargs['rel'].through is not None: assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used." super(ManyToManyField, self).__init__(**kwargs) def check(self, **kwargs): errors = super(ManyToManyField, self).check(**kwargs) errors.extend(self._check_unique(**kwargs)) errors.extend(self._check_relationship_model(**kwargs)) return errors def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( 'ManyToManyFields must not be unique.', hint=None, obj=self, id='E022', ) ] return [] def _check_relationship_model(self, from_model=None, **kwargs): errors = [] if self.rel.through not in apps.get_models(include_auto_created=True): # The relationship model is not installed. errors.append( checks.Error( ('The field specifies a many-to-many relation through model ' '%s, which has not been installed.') % self.rel.through, hint=('Ensure that you did not misspell the model name and ' 'the model is not abstract. Does your INSTALLED_APPS ' 'setting contain the app where %s is defined?') % self.rel.through, obj=self, id='E023', ) ) elif not isinstance(self.rel.through, six.string_types): assert from_model is not None, \ "ManyToManyField with intermediate " \ "tables cannot be checked if you don't pass the model " \ "where the field is attached to." # Set some useful local variables to_model = self.rel.to from_model_name = from_model._meta.object_name if isinstance(to_model, six.string_types): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.rel.through._meta.object_name self_referential = from_model == to_model # Check symmetrical attribute. if (self_referential and self.rel.symmetrical and not self.rel.through._meta.auto_created): errors.append( checks.Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', hint=None, obj=self, id='E024', ) ) # Count foreign keys in intermediate model if self_referential: seen_self = sum(from_model == getattr(field.rel, 'to', None) for field in self.rel.through._meta.fields) if seen_self > 2: errors.append( checks.Error( ('The model is used as an intermediary model by ' '%s, but it has more than two foreign keys ' 'to %s, which is ambiguous and is not permitted.') % (self, from_model_name), hint=None, obj=self.rel.through, id='E025', ) ) else: # Count foreign keys in relationship model seen_from = sum(from_model == getattr(field.rel, 'to', None) for field in self.rel.through._meta.fields) seen_to = sum(to_model == getattr(field.rel, 'to', None) for field in self.rel.through._meta.fields) if seen_from > 1: errors.append( checks.Error( ('The model is used as an intermediary model by ' '%s, but it has more than one foreign key ' 'to %s, which is ambiguous and is not permitted.') % (self, from_model_name), hint=('If you want to create a recursive relationship, ' 'use ForeignKey("self", symmetrical=False, ' 'through="%s").') % relationship_model_name, obj=self, id='E026', ) ) if seen_to > 1: errors.append( checks.Error( ('The model is used as an intermediary model by ' '%s, but it has more than one foreign key ' 'to %s, which is ambiguous and is not permitted.') % (self, to_model_name), hint=('If you want to create a recursive ' 'relationship, use ForeignKey("self", ' 'symmetrical=False, through="%s").') % relationship_model_name, obj=self, id='E027', ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( ('The model is used as an intermediary model by ' '%s, but it misses a foreign key to %s or %s.') % ( self, from_model_name, to_model_name ), hint=None, obj=self.rel.through, id='E028', ) ) return errors def deconstruct(self): name, path, args, kwargs = super(ManyToManyField, self).deconstruct() # Handle the simpler arguments if self.rel.db_constraint is not True: kwargs['db_constraint'] = self.rel.db_constraint if "help_text" in kwargs: del kwargs['help_text'] # Rel needs more work. if isinstance(self.rel.to, six.string_types): kwargs['to'] = self.rel.to else: kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name) # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError("Cannot deconstruct a ManyToManyField pointing to a model that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting)) # Set it from django.db.migrations.writer import SettingsReference kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False): """ Called by both direct an indirect m2m traversal. """ pathinfos = [] int_model = self.rel.through linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0] linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0] if direct: join1infos = linkfield1.get_reverse_path_info() join2infos = linkfield2.get_path_info() else: join1infos = linkfield2.get_reverse_path_info() join2infos = linkfield1.get_path_info() pathinfos.extend(join1infos) pathinfos.extend(join2infos) return pathinfos def get_path_info(self): return self._get_path_info(direct=True) def get_reverse_path_info(self): return self._get_path_info(direct=False) def get_choices_default(self): return Field.get_choices(self, include_blank=False) def _get_m2m_db_table(self, opts): "Function that can be curried to provide the m2m table name for this relation" if self.rel.through is not None: return self.rel.through._meta.db_table elif self.db_table: return self.db_table else: return utils.truncate_name('%s_%s' % (opts.db_table, self.name), connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): "Function that can be curried to provide the source accessor or DB column name for the m2m table" cache_attr = '_m2m_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) for f in self.rel.through._meta.fields: if hasattr(f, 'rel') and f.rel and f.rel.to == related.model: setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): "Function that can be curried to provide the related accessor or DB column name for the m2m table" cache_attr = '_m2m_reverse_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False for f in self.rel.through._meta.fields: if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model: if related.model == related.parent_model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True else: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def value_to_string(self, obj): data = '' if obj: qs = getattr(obj, self.name).all() data = [instance._get_pk_val() for instance in qs] else: # In required many-to-many fields with only one available choice, # select that one available choice. if not self.blank: choices_list = self.get_choices_default() if len(choices_list) == 1: data = [choices_list[0][0]] return smart_text(data) def contribute_to_class(self, cls, name): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name): self.rel.related_name = "%s_rel_+" % name super(ManyToManyField, self).contribute_to_class(cls, name) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped: self.rel.through = create_many_to_many_intermediary_model(self, cls) # Add the descriptor for the m2m relation setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self)) # Set up the accessor for the m2m table name for the relation self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta) # Populate some necessary rel arguments so that cross-app relations # work correctly. if isinstance(self.rel.through, six.string_types): def resolve_through_model(field, model, cls): field.rel.through = model add_lazy_relation(cls, self, self.rel.through, resolve_through_model) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if not self.rel.is_hidden() and not related.model._meta.swapped: setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related)) # Set up the accessors for the column names on the m2m table self.m2m_column_name = curry(self._get_m2m_attr, related, 'column') self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column') self.m2m_field_name = curry(self._get_m2m_attr, related, 'name') self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name') get_m2m_rel = curry(self._get_m2m_attr, related, 'rel') self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel') self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): "Returns the value of this field in the given model instance." return getattr(obj, self.attname).all() def save_form_data(self, instance, data): setattr(instance, self.attname, data) def formfield(self, **kwargs): db = kwargs.pop('using', None) defaults = { 'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.using(db), } defaults.update(kwargs) # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get('initial') is not None: initial = defaults['initial'] if callable(initial): initial = initial() defaults['initial'] = [i._get_pk_val() for i in initial] return super(ManyToManyField, self).formfield(**defaults) def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
bsd-3-clause
b1-systems/kiwi
kiwi/boot/image/dracut.py
1
6648
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved. # # This file is part of kiwi. # # kiwi is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # kiwi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with kiwi. If not, see <http://www.gnu.org/licenses/> # import os import re from collections import namedtuple # project from kiwi.logger import log from kiwi.command import Command from kiwi.system.kernel import Kernel from kiwi.boot.image.base import BootImageBase from kiwi.defaults import Defaults from kiwi.system.profile import Profile from kiwi.system.setup import SystemSetup from kiwi.path import Path from kiwi.exceptions import KiwiDiskBootImageError class BootImageDracut(BootImageBase): """ **Implements creation of dracut boot(initrd) images.** """ def post_init(self): """ Post initialization method Initialize empty list of dracut caller options """ self.dracut_options = [] self.included_files = [] self.included_files_install = [] def include_file(self, filename, install_media=False): """ Include file to dracut boot image :param string filename: file path name """ self.included_files.append('--install') self.included_files.append(filename) if install_media: self.included_files_install.append('--install') self.included_files_install.append(filename) def prepare(self): """ Prepare dracut caller environment * Create kiwi .profile environment to be included in dracut initrd * Setup machine_id(s) to be generic and rebuild by dracut on boot """ profile = Profile(self.xml_state) defaults = Defaults() defaults.to_profile(profile) setup = SystemSetup( self.xml_state, self.boot_root_directory ) setup.import_shell_environment(profile) setup.setup_machine_id() self.dracut_options.append('--install') self.dracut_options.append('/.profile') def create_initrd(self, mbrid=None, basename=None, install_initrd=False): """ Call dracut as chroot operation to create the initrd and move the result into the image build target directory :param object mbrid: unused :param string basename: base initrd file name :param bool install_initrd: installation media initrd """ if self.is_prepared(): log.info('Creating generic dracut initrd archive') kernel_info = Kernel(self.boot_root_directory) kernel_details = kernel_info.get_kernel(raise_on_not_found=True) if basename: dracut_initrd_basename = basename else: dracut_initrd_basename = self.initrd_base_name if install_initrd: included_files = self.included_files_install else: included_files = self.included_files dracut_initrd_basename += '.xz' Command.run( [ 'chroot', self.boot_root_directory, 'dracut', '--force', '--no-hostonly', '--no-hostonly-cmdline', '--xz' ] + self.dracut_options + included_files + [ dracut_initrd_basename, kernel_details.version ] ) Command.run( [ 'mv', os.sep.join( [self.boot_root_directory, dracut_initrd_basename] ), self.target_dir ] ) self.initrd_filename = os.sep.join( [self.target_dir, dracut_initrd_basename] ) def get_boot_names(self): """ Provides kernel and initrd names for kiwi boot image :return: Contains boot_names_type tuple .. code:: python boot_names_type( kernel_name='INSTALLED_KERNEL', initrd_name='DRACUT_OUTPUT_NAME' ) :rtype: tuple """ boot_names_type = namedtuple( 'boot_names_type', ['kernel_name', 'initrd_name'] ) kernel = Kernel( self.boot_root_directory ) kernel_info = kernel.get_kernel() if not kernel_info: raise KiwiDiskBootImageError( 'No kernel in boot image tree %s found' % self.boot_root_directory ) dracut_output_format = self._get_dracut_output_file_format() return boot_names_type( kernel_name=kernel_info.name, initrd_name=dracut_output_format.format( kernel_version=kernel_info.version ) ) def _get_dracut_output_file_format(self): """ Unfortunately the dracut initrd output file format varies between the different Linux distributions. Tools like lsinitrd, and also grub2 rely on the initrd output file to be in that format. Thus when kiwi uses dracut the same file format should be used all over the place in order to stay compatible with what the distribution does """ default_outfile_format = 'initramfs-{kernel_version}.img' dracut_search_env = { 'PATH': os.sep.join([self.boot_root_directory, 'usr', 'bin']) } dracut_tool = Path.which( 'dracut', custom_env=dracut_search_env, access_mode=os.X_OK ) if dracut_tool: outfile_expression = r'outfile="/boot/(init.*\$kernel.*)"' with open(dracut_tool) as dracut: outfile = re.findall(outfile_expression, dracut.read())[0] if outfile: return outfile.replace('$kernel', '{kernel_version}') log.warning('Could not detect dracut output file format') log.warning('Using default initrd file name format {0}'.format( default_outfile_format )) return default_outfile_format
gpl-3.0
damdam-s/OpenUpgrade
addons/hw_posbox_homepage/controllers/main.py
99
2009
# -*- coding: utf-8 -*- import logging import os import time from os import listdir import openerp from openerp import http from openerp.http import request from openerp.tools.translate import _ _logger = logging.getLogger(__name__) index_template = """ <!DOCTYPE HTML> <html> <head> <title>Odoo's PosBox</title> <style> body { width: 480px; margin: 60px auto; font-family: sans-serif; text-align: justify; color: #6B6B6B; } </style> </head> <body> <h1>Your PosBox is up and running</h1> <p> The PosBox is an hardware adapter that allows you to use receipt printers and barcode scanners with Odoo's Point of Sale, <b>version 8.0 or later</b>. You can start an <a href='https://www.odoo.com/start'>online free trial</a>, or <a href='https://www.odoo.com/start?download'>download and install</a> it yourself. </p> <p> For more information on how to setup the Point of Sale with the PosBox, please refer to <a href='/hw_proxy/static/doc/manual.pdf'>the manual</a> </p> <p> To see the status of the connected hardware, please refer to the <a href='/hw_proxy/status'>hardware status page</a> </p> <p> The PosBox software installed on this posbox is <b>version 6</b>, the posbox version number is independent from Odoo. You can upgrade the software on the <a href='/hw_proxy/upgrade/'>upgrade page</a> </p> <p>For any other question, please contact the Odoo support at <a href='mailto:[email protected]'>[email protected]</a> </p> </body> </html> """ class PosboxHomepage(openerp.addons.web.controllers.main.Home): @http.route('/', type='http', auth='none', website=True) def index(self): #return request.render('hw_posbox_homepage.index',mimetype='text/html') return index_template
agpl-3.0
moto-timo/ironpython3
Src/StdLib/Lib/tkinter/font.py
75
6611
# Tkinter font wrapper # # written by Fredrik Lundh, February 1998 # __version__ = "0.9" import itertools import tkinter # weight/slant NORMAL = "normal" ROMAN = "roman" BOLD = "bold" ITALIC = "italic" def nametofont(name): """Given the name of a tk named font, returns a Font representation. """ return Font(name=name, exists=True) class Font: """Represents a named font. Constructor options are: font -- font specifier (name, system font, or (family, size, style)-tuple) name -- name to use for this font configuration (defaults to a unique name) exists -- does a named font by this name already exist? Creates a new named font if False, points to the existing font if True. Raises _tkinter.TclError if the assertion is false. the following are ignored if font is specified: family -- font 'family', e.g. Courier, Times, Helvetica size -- font size in points weight -- font thickness: NORMAL, BOLD slant -- font slant: ROMAN, ITALIC underline -- font underlining: false (0), true (1) overstrike -- font strikeout: false (0), true (1) """ counter = itertools.count(1) def _set(self, kw): options = [] for k, v in kw.items(): options.append("-"+k) options.append(str(v)) return tuple(options) def _get(self, args): options = [] for k in args: options.append("-"+k) return tuple(options) def _mkdict(self, args): options = {} for i in range(0, len(args), 2): options[args[i][1:]] = args[i+1] return options def __init__(self, root=None, font=None, name=None, exists=False, **options): if not root: root = tkinter._default_root tk = getattr(root, 'tk', root) if font: # get actual settings corresponding to the given font font = tk.splitlist(tk.call("font", "actual", font)) else: font = self._set(options) if not name: name = "font" + str(next(self.counter)) self.name = name if exists: self.delete_font = False # confirm font exists if self.name not in tk.splitlist(tk.call("font", "names")): raise tkinter._tkinter.TclError( "named font %s does not already exist" % (self.name,)) # if font config info supplied, apply it if font: tk.call("font", "configure", self.name, *font) else: # create new font (raises TclError if the font exists) tk.call("font", "create", self.name, *font) self.delete_font = True self._tk = tk self._split = tk.splitlist self._call = tk.call def __str__(self): return self.name def __eq__(self, other): return isinstance(other, Font) and self.name == other.name def __getitem__(self, key): return self.cget(key) def __setitem__(self, key, value): self.configure(**{key: value}) def __del__(self): try: if self.delete_font: self._call("font", "delete", self.name) except (KeyboardInterrupt, SystemExit): raise except Exception: pass def copy(self): "Return a distinct copy of the current font" return Font(self._tk, **self.actual()) def actual(self, option=None, displayof=None): "Return actual font attributes" args = () if displayof: args = ('-displayof', displayof) if option: args = args + ('-' + option, ) return self._call("font", "actual", self.name, *args) else: return self._mkdict( self._split(self._call("font", "actual", self.name, *args))) def cget(self, option): "Get font attribute" return self._call("font", "config", self.name, "-"+option) def config(self, **options): "Modify font attributes" if options: self._call("font", "config", self.name, *self._set(options)) else: return self._mkdict( self._split(self._call("font", "config", self.name))) configure = config def measure(self, text, displayof=None): "Return text width" args = (text,) if displayof: args = ('-displayof', displayof, text) return int(self._call("font", "measure", self.name, *args)) def metrics(self, *options, **kw): """Return font metrics. For best performance, create a dummy widget using this font before calling this method.""" args = () displayof = kw.pop('displayof', None) if displayof: args = ('-displayof', displayof) if options: args = args + self._get(options) return int( self._call("font", "metrics", self.name, *args)) else: res = self._split(self._call("font", "metrics", self.name, *args)) options = {} for i in range(0, len(res), 2): options[res[i][1:]] = int(res[i+1]) return options def families(root=None, displayof=None): "Get font families (as a tuple)" if not root: root = tkinter._default_root args = () if displayof: args = ('-displayof', displayof) return root.tk.splitlist(root.tk.call("font", "families", *args)) def names(root=None): "Get names of defined fonts (as a tuple)" if not root: root = tkinter._default_root return root.tk.splitlist(root.tk.call("font", "names")) # -------------------------------------------------------------------- # test stuff if __name__ == "__main__": root = tkinter.Tk() # create a font f = Font(family="times", size=30, weight=NORMAL) print(f.actual()) print(f.actual("family")) print(f.actual("weight")) print(f.config()) print(f.cget("family")) print(f.cget("weight")) print(names()) print(f.measure("hello"), f.metrics("linespace")) print(f.metrics(displayof=root)) f = Font(font=("Courier", 20, "bold")) print(f.measure("hello"), f.metrics("linespace", displayof=root)) w = tkinter.Label(root, text="Hello, world", font=f) w.pack() w = tkinter.Button(root, text="Quit!", command=root.destroy) w.pack() fb = Font(font=w["font"]).copy() fb.config(weight=BOLD) w.config(font=fb) tkinter.mainloop()
apache-2.0
andyraib/data-storage
python_scripts/env/lib/python3.6/site-packages/numpy/f2py/tests/test_callback.py
145
3040
from __future__ import division, absolute_import, print_function import math import textwrap from numpy import array from numpy.testing import run_module_suite, assert_, assert_equal, dec import util class TestF77Callback(util.F2PyTest): code = """ subroutine t(fun,a) integer a cf2py intent(out) a external fun call fun(a) end subroutine func(a) cf2py intent(in,out) a integer a a = a + 11 end subroutine func0(a) cf2py intent(out) a integer a a = 11 end subroutine t2(a) cf2py intent(callback) fun integer a cf2py intent(out) a external fun call fun(a) end subroutine string_callback(callback, a) external callback double precision callback double precision a character*1 r cf2py intent(out) a r = 'r' a = callback(r) end """ @dec.slow def test_all(self): for name in "t,t2".split(","): self.check_function(name) @dec.slow def test_docstring(self): expected = """ a = t(fun,[fun_extra_args]) Wrapper for ``t``. Parameters ---------- fun : call-back function Other Parameters ---------------- fun_extra_args : input tuple, optional Default: () Returns ------- a : int Notes ----- Call-back functions:: def fun(): return a Return objects: a : int """ assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip()) def check_function(self, name): t = getattr(self.module, name) r = t(lambda: 4) assert_(r == 4, repr(r)) r = t(lambda a: 5, fun_extra_args=(6,)) assert_(r == 5, repr(r)) r = t(lambda a: a, fun_extra_args=(6,)) assert_(r == 6, repr(r)) r = t(lambda a: 5 + a, fun_extra_args=(7,)) assert_(r == 12, repr(r)) r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,)) assert_(r == 180, repr(r)) r = t(math.degrees, fun_extra_args=(math.pi,)) assert_(r == 180, repr(r)) r = t(self.module.func, fun_extra_args=(6,)) assert_(r == 17, repr(r)) r = t(self.module.func0) assert_(r == 11, repr(r)) r = t(self.module.func0._cpointer) assert_(r == 11, repr(r)) class A(object): def __call__(self): return 7 def mth(self): return 9 a = A() r = t(a) assert_(r == 7, repr(r)) r = t(a.mth) assert_(r == 9, repr(r)) def test_string_callback(self): def callback(code): if code == 'r': return 0 else: return 1 f = getattr(self.module, 'string_callback') r = f(callback) assert_(r == 0, repr(r)) if __name__ == "__main__": run_module_suite()
apache-2.0
JohanComparat/pyEmerge
python/remap.py
1
8482
#!/usr/bin/python # # remap.py import sys from math import * verbose = False # Use fast vec3 implementation if Numpy is available import numpy as N class vec3(N.ndarray): """A simple 3D vector class, using Numpy for fast array operations.""" def __new__(cls, *args): a = N.ndarray.__new__(vec3, (3,), float) if len(args) == 0: a[0] = a[1] = a[2] = 0 elif len(args) == 1: v = args[0] a[0] = v[0] a[1] = v[1] a[2] = v[2] elif len(args) == 3: a[0] = args[0] a[1] = args[1] a[2] = args[2] else: raise RuntimeError return a def _getx(self): return self[0] def _gety(self): return self[1] def _getz(self): return self[2] def _setx(self, value): self[0] = value def _sety(self, value): self[1] = value def _setz(self, value): self[2] = value x = property(_getx, _setx) y = property(_gety, _sety) z = property(_getz, _setz) def dot(u, v): return u.x*v.x + u.y*v.y + u.z*v.z def square(v): return v.x**2 + v.y**2 + v.z**2 def length(v): return sqrt(square(v)) def triple_scalar_product(u, v, w): return u.x*(v.y*w.z - v.z*w.y) + u.y*(v.z*w.x - v.x*w.z) + u.z*(v.x*w.y - v.y*w.x) class Plane: def __init__(self, p, n): self.a = n.x self.b = n.y self.c = n.z self.d = -dot(p,n) def normal(self): ell = sqrt(self.a**2 + self.b**2 + self.c**2) return vec3(self.a/ell, self.b/ell, self.c/ell) def test(self, x, y, z): """Compare a point to a plane. Return value is positive, negative, or zero depending on whether the point lies above, below, or on the plane.""" return self.a*x + self.b*y + self.c*z + self.d class Cell: def __init__(self, ix=0, iy=0, iz=0): self.ix = ix self.iy = iy self.iz = iz self.faces = [] def contains(self, x, y, z): for f in self.faces: if f.test(x,y,z) < 0: return False return True def UnitCubeTest(P): """Return +1, 0, or -1 if the unit cube is above, below, or intersecting the plane.""" above = 0 below = 0 for (a,b,c) in [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]: s = P.test(a, b, c) if s > 0: above = 1 elif s < 0: below = 1 return above - below class Cuboid: """Cuboid remapping class.""" def __init__(self, u1=(1,0,0), u2=(0,1,0), u3=(0,0,1)): """Initialize by passing a 3x3 invertible integer matrix.""" u1 = vec3(u1) u2 = vec3(u2) u3 = vec3(u3) if triple_scalar_product(u1, u2, u3) != 1: print( "!! Invalid lattice vectors: u1 = %s, u2 = %s, u3 = %s" % (u1,u2,u3) ) self.e1 = vec3(1,0,0) self.e2 = vec3(0,1,0) self.e3 = vec3(0,0,1) else: s1 = square(u1) s2 = square(u2) d12 = dot(u1, u2) d23 = dot(u2, u3) d13 = dot(u1, u3) alpha = -d12/s1 gamma = -(alpha*d13 + d23)/(alpha*d12 + s2) beta = -(d13 + gamma*d12)/s1 self.e1 = u1 self.e2 = u2 + alpha*u1 self.e3 = u3 + beta*u1 + gamma*u2 if verbose: print( "e1 = %s" % self.e1) print( "e2 = %s" % self.e2) print( "e3 = %s" % self.e3) self.L1 = length(self.e1) self.L2 = length(self.e2) self.L3 = length(self.e3) self.n1 = self.e1/self.L1 self.n2 = self.e2/self.L2 self.n3 = self.e3/self.L3 self.cells = [] v0 = vec3(0,0,0) self.v = [v0, v0 + self.e3, v0 + self.e2, v0 + self.e2 + self.e3, v0 + self.e1, v0 + self.e1 + self.e3, v0 + self.e1 + self.e2, v0 + self.e1 + self.e2 + self.e3] # Compute bounding box of cuboid xs = [vk.x for vk in self.v] ys = [vk.y for vk in self.v] zs = [vk.z for vk in self.v] vmin = vec3(min(xs), min(ys), min(zs)) vmax = vec3(max(xs), max(ys), max(zs)) # Extend to nearest integer coordinates ixmin = int(floor(vmin.x)) ixmax = int(ceil(vmax.x)) iymin = int(floor(vmin.y)) iymax = int(ceil(vmax.y)) izmin = int(floor(vmin.z)) izmax = int(ceil(vmax.z)) if verbose: print( "ixmin, ixmax = %d, %d" % (ixmin,ixmax) ) print( "iymin, iymax = %d, %d" % (iymin,iymax) ) print( "izmin, izmax = %d, %d" % (izmin,izmax) ) # Determine which cells (and which faces within those cells) are non-trivial for ix in range(ixmin, ixmax): for iy in range(iymin, iymax): for iz in range(izmin, izmax): shift = vec3(-ix, -iy, -iz) faces = [Plane(self.v[0] + shift, +self.n1), Plane(self.v[4] + shift, -self.n1), Plane(self.v[0] + shift, +self.n2), Plane(self.v[2] + shift, -self.n2), Plane(self.v[0] + shift, +self.n3), Plane(self.v[1] + shift, -self.n3)] c = Cell(ix, iy, iz) skipcell = False for f in faces: r = UnitCubeTest(f) if r == +1: # Unit cube is completely above this plane; this cell is empty continue elif r == 0: # Unit cube intersects this plane; keep track of it c.faces.append(f) elif r == -1: skipcell = True break if skipcell or len(c.faces) == 0: if verbose: print( "Skipping cell at (%d,%d,%d)" % (ix,iy,iz)) continue else: self.cells.append(c) if verbose: print( "Adding cell at (%d,%d,%d)" % (ix,iy,iz) ) # For the identity remapping, use exactly one cell if len(self.cells) == 0: self.cells.append(Cell()) # Print the full list of cells if verbose: print( "%d non-empty cells" % len(self.cells)) for c in self.cells: print( "Cell at (%d,%d,%d) has %d non-trivial planes" % (c.ix, c.iy, c.iz, len(c.faces)) ) def Transform(self, x, y, z): for c in self.cells: if c.contains(x,y,z): x += c.ix y += c.iy z += c.iz p = vec3(x,y,z) return (dot(p, self.n1), dot(p, self.n2), dot(p, self.n3)) raise RuntimeError( "(%g, %g, %g) not contained in any cell" % (x,y,z) ) def InverseTransform(self, r1, r2, r3): p = r1*self.n1 + r2*self.n2 + r3*self.n3 x1 = fmod(p[0], 1) + (p[0] < 0) x2 = fmod(p[1], 1) + (p[1] < 0) x3 = fmod(p[2], 1) + (p[2] < 0) return vec3(x1, x2, x3) def abort(msg=None, code=1): if msg: print(msg) #>> sys.stderr, msg sys.exit(code) if __name__ == '__main__': # Parse command line arguments params = {} for arg in sys.argv[1:]: pair = arg.split('=', 1) if len(pair) == 2: name, val = pair if name == "m": params['m'] = int(val) elif name == "n": params['n'] = int(val) elif name == "u": params['u'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()] elif name == "u1": params['u1'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()] elif name == "u2": params['u2'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()] elif name == "u3": params['u3'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()] elif name == "in": params['in'] = str(val) elif name == "out": params['out'] = str(val) else: abort("Unrecognized parameter '%s'" % name) else: if arg == "-v" or arg == "--verbose": verbose = True elif arg == "-h" or arg == "--help": print( "Usage: python remap.py [OPTIONS] PARAMS" ) else: abort("Unrecognized option '%s'" % arg) # Open input and output files if 'in' not in params or params['in'] == "stdin": fin = sys.stdin else: fin = open(params['in'], "r") if not fin: abort("Could not open input file '%s'" % params['in']) if 'out' not in params or params['out'] == "stdout": fout = sys.stdout else: fout = open(params['out'], "w") if not fout: abort("!! Could not open output file '%s'" % params['out']) # Initialize remapping if 'm' in params and 'n' in params: m = params['m'] n = params['n'] u1 = (1,m,n) u2 = (0,1,0) u3 = (0,0,1) elif 'u' in params: u = params['u'] if len(u) != 9: abort("!! Input matrix 'u' should have 9 components, not %d" % len(u)) u1 = (u[0], u[1], u[2]) u2 = (u[3], u[4], u[5]) u3 = (u[6], u[7], u[8]) elif 'u1' in params and 'u2' in params and 'u3' in params: u1 = params['u1'] u2 = params['u2'] u3 = params['u3'] else: print( "?? Cuboid geometry not specified, assuming trivial remapping" ) u1 = (1,0,0) u2 = (0,1,0) u3 = (0,0,1) if verbose: print( "u1 = %s, u2 = %s, u3 = %s" % (u1,u2,u3)) C = Cuboid(u1, u2, u3) for line in fin: line = line.strip() if len(line) == 0 or line.startswith('#'): continue coords = line.replace(',', ' ').split() if len(coords) != 3: print("?? Expecting 3 coordinates per line, not '%s'" % line) continue (xin,yin,zin) = float(coords[0]), float(coords[1]), float(coords[2]) (xout,yout,zout) = C.Transform(xin, yin, zin) print( "%e %e %e" % (xout,yout,zout )) fin.close() fout.close()
unlicense
hazelnusse/robot.bicycle
gui/rbgui.py
1
1626
#!/usr/bin/env python3 """ Robot Bicycle GUI. Barebones GUI allows user to run common shell commands with buttons. Oliver Lee ([email protected]) 5 December 2012 """ import sys from PyQt4 import QtGui, QtCore from rbg_shell import * from rbg_widgets import * from rbg_button_page import ButtonPage from rbg_plot_page import PlotPage class MainWindow(QtGui.QMainWindow): reseted = QtCore.pyqtSignal() state_updated = QtCore.pyqtSignal(RbState) def __init__(self): super(MainWindow, self).__init__() tabs = QtGui.QTabWidget() self.statusBar() button_page = ButtonPage() button_page.status_updated.connect(self.update_status) self.shell = button_page.shell tabs.addTab(button_page, "Data Collection and Control") plot_page = PlotPage() tabs.addTab(plot_page, "Data Plots") self.cw = tabs self.init_ui() def init_ui(self): self.setCentralWidget(self.cw) self.resize(1000, 600) qr = self.frameGeometry() #geometry of the main window #center point of monitor cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) self.setWindowTitle('BEEP BEEP BLOOP BLARP') self.show() def update_status(self, text): self.statusBar().showMessage(text) def main(): app = QtGui.QApplication(sys.argv) ex = MainWindow() app.aboutToQuit.connect(ex.shell.disconnect) sys.exit(app.exec_()) if __name__ == '__main__': main()
bsd-2-clause
kou998/openclas
cpp/script/bom.py
2
1696
#!/usr/bin/env python2.5 """ Strip or append leading byte-order-mark from utf-8 files. """ import sys import os def usage(prog): print >>sys.stderr, 'usage: %s <option>' % prog print >>sys.stderr, '-a, --append : Append UTF-8 BOM as the prefix of all C/C++ files recursivly from current directory' print >>sys.stderr, '-r, --remove : Remove UTF-8 BOM prefix of all C/C++ files recursivly from current directory' sys.exit(1) def bomer(filename, encoding): import codecs infile = codecs.open( filename, "r", "utf-8-sig" ) # 'utf-8-sig' encoding requires Python 2.5 or higher u = infile.read() # Returns a Unicode string from the UTF-8 bytes in the file infile.close() outfile = codecs.open( filename, "w", encoding) outfile.write( u ) outfile.close() def main(prog, *args): from getopt import getopt, GetoptError try: opts, args = getopt(args, 'ar', ['append', 'remove']) except GetoptError: usage(prog) encoding = "utf-8-sig" op_cmd = "Append" for o, a in opts: if o in ("-a", "--append"): encoding = "utf-8-sig" op_cmd = "Append" elif o in ("-r", "--remove"): encoding = "utf-8" op_cmd = "Remove" else: assert False, "unhandled option" if len(args) == 1: op_dir = args[0] else: op_dir = "." print op_cmd, "UTF8 BOM from top directory (", os.path.abspath(op_dir), ") ..." for root, dirs, files in os.walk(op_dir): for name in files: base, ext = os.path.splitext(name) if ext in (".h", ".hpp", ".hxx", ".c", ".cpp", ".cxx"): filename = os.path.join(root, name) bomer(filename, encoding) print 'Converted ', filename if __name__ == '__main__': try: main(*sys.argv) except KeyboardInterrupt: sys.exit(1)
bsd-2-clause
ryanmockabee/golfr
flask/lib/python3.6/site-packages/sqlalchemy/orm/collections.py
32
52410
# orm/collections.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for collections of mapped entities. The collections package supplies the machinery used to inform the ORM of collection membership changes. An instrumentation via decoration approach is used, allowing arbitrary types (including built-ins) to be used as entity collections without requiring inheritance from a base class. Instrumentation decoration relays membership change events to the :class:`.CollectionAttributeImpl` that is currently managing the collection. The decorators observe function call arguments and return values, tracking entities entering or leaving the collection. Two decorator approaches are provided. One is a bundle of generic decorators that map function arguments and return values to events:: from sqlalchemy.orm.collections import collection class MyClass(object): # ... @collection.adds(1) def store(self, item): self.data.append(item) @collection.removes_return() def pop(self): return self.data.pop() The second approach is a bundle of targeted decorators that wrap appropriate append and remove notifiers around the mutation methods present in the standard Python ``list``, ``set`` and ``dict`` interfaces. These could be specified in terms of generic decorator recipes, but are instead hand-tooled for increased efficiency. The targeted decorators occasionally implement adapter-like behavior, such as mapping bulk-set methods (``extend``, ``update``, ``__setslice__``, etc.) into the series of atomic mutation events that the ORM requires. The targeted decorators are used internally for automatic instrumentation of entity collection classes. Every collection class goes through a transformation process roughly like so: 1. If the class is a built-in, substitute a trivial sub-class 2. Is this class already instrumented? 3. Add in generic decorators 4. Sniff out the collection interface through duck-typing 5. Add targeted decoration to any undecorated interface method This process modifies the class at runtime, decorating methods and adding some bookkeeping properties. This isn't possible (or desirable) for built-in classes like ``list``, so trivial sub-classes are substituted to hold decoration:: class InstrumentedList(list): pass Collection classes can be specified in ``relationship(collection_class=)`` as types or a function that returns an instance. Collection classes are inspected and instrumented during the mapper compilation phase. The collection_class callable will be executed once to produce a specimen instance, and the type of that specimen will be instrumented. Functions that return built-in types like ``lists`` will be adapted to produce instrumented instances. When extending a known type like ``list``, additional decorations are not generally not needed. Odds are, the extension method will delegate to a method that's already instrumented. For example:: class QueueIsh(list): def push(self, item): self.append(item) def shift(self): return self.pop(0) There's no need to decorate these methods. ``append`` and ``pop`` are already instrumented as part of the ``list`` interface. Decorating them would fire duplicate events, which should be avoided. The targeted decoration tries not to rely on other methods in the underlying collection class, but some are unavoidable. Many depend on 'read' methods being present to properly instrument a 'write', for example, ``__setitem__`` needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also reimplemented in terms of atomic appends and removes, so the ``extend`` decoration will actually perform many ``append`` operations and not call the underlying method at all. Tight control over bulk operation and the firing of events is also possible by implementing the instrumentation internally in your methods. The basic instrumentation package works under the general assumption that collection mutation will not raise unusual exceptions. If you want to closely orchestrate append and remove events with exception management, internal instrumentation may be the answer. Within your method, ``collection_adapter(self)`` will retrieve an object that you can use for explicit control over triggering append and remove events. The owning object and :class:`.CollectionAttributeImpl` are also reachable through the adapter, allowing for some very sophisticated behavior. """ import inspect import operator import weakref from ..sql import expression from .. import util, exc as sa_exc from . import base from sqlalchemy.util.compat import inspect_getargspec __all__ = ['collection', 'collection_adapter', 'mapped_collection', 'column_mapped_collection', 'attribute_mapped_collection'] __instrumentation_mutex = util.threading.Lock() class _PlainColumnGetter(object): """Plain column getter, stores collection of Column objects directly. Serializes to a :class:`._SerializableColumnGetterV2` which has more expensive __call__() performance and some rare caveats. """ def __init__(self, cols): self.cols = cols self.composite = len(cols) > 1 def __reduce__(self): return _SerializableColumnGetterV2._reduce_from_cols(self.cols) def _cols(self, mapper): return self.cols def __call__(self, value): state = base.instance_state(value) m = base._state_mapper(state) key = [ m._get_state_attr_by_column(state, state.dict, col) for col in self._cols(m) ] if self.composite: return tuple(key) else: return key[0] class _SerializableColumnGetter(object): """Column-based getter used in version 0.7.6 only. Remains here for pickle compatibility with 0.7.6. """ def __init__(self, colkeys): self.colkeys = colkeys self.composite = len(colkeys) > 1 def __reduce__(self): return _SerializableColumnGetter, (self.colkeys,) def __call__(self, value): state = base.instance_state(value) m = base._state_mapper(state) key = [m._get_state_attr_by_column( state, state.dict, m.mapped_table.columns[k]) for k in self.colkeys] if self.composite: return tuple(key) else: return key[0] class _SerializableColumnGetterV2(_PlainColumnGetter): """Updated serializable getter which deals with multi-table mapped classes. Two extremely unusual cases are not supported. Mappings which have tables across multiple metadata objects, or which are mapped to non-Table selectables linked across inheriting mappers may fail to function here. """ def __init__(self, colkeys): self.colkeys = colkeys self.composite = len(colkeys) > 1 def __reduce__(self): return self.__class__, (self.colkeys,) @classmethod def _reduce_from_cols(cls, cols): def _table_key(c): if not isinstance(c.table, expression.TableClause): return None else: return c.table.key colkeys = [(c.key, _table_key(c)) for c in cols] return _SerializableColumnGetterV2, (colkeys,) def _cols(self, mapper): cols = [] metadata = getattr(mapper.local_table, 'metadata', None) for (ckey, tkey) in self.colkeys: if tkey is None or \ metadata is None or \ tkey not in metadata: cols.append(mapper.local_table.c[ckey]) else: cols.append(metadata.tables[tkey].c[ckey]) return cols def column_mapped_collection(mapping_spec): """A dictionary-based collection type with column-based keying. Returns a :class:`.MappedCollection` factory with a keying function generated from mapping_spec, which may be a Column or a sequence of Columns. The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush. """ cols = [expression._only_column_elements(q, "mapping_spec") for q in util.to_list(mapping_spec) ] keyfunc = _PlainColumnGetter(cols) return lambda: MappedCollection(keyfunc) class _SerializableAttrGetter(object): def __init__(self, name): self.name = name self.getter = operator.attrgetter(name) def __call__(self, target): return self.getter(target) def __reduce__(self): return _SerializableAttrGetter, (self.name, ) def attribute_mapped_collection(attr_name): """A dictionary-based collection type with attribute-based keying. Returns a :class:`.MappedCollection` factory with a keying based on the 'attr_name' attribute of entities in the collection, where ``attr_name`` is the string name of the attribute. The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush. """ getter = _SerializableAttrGetter(attr_name) return lambda: MappedCollection(getter) def mapped_collection(keyfunc): """A dictionary-based collection type with arbitrary keying. Returns a :class:`.MappedCollection` factory with a keying function generated from keyfunc, a callable that takes an entity and returns a key value. The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush. """ return lambda: MappedCollection(keyfunc) class collection(object): """Decorators for entity collection classes. The decorators fall into two groups: annotations and interception recipes. The annotating decorators (appender, remover, iterator, linker, converter, internally_instrumented) indicate the method's purpose and take no arguments. They are not written with parens:: @collection.appender def append(self, append): ... The recipe decorators all require parens, even those that take no arguments:: @collection.adds('entity') def insert(self, position, entity): ... @collection.removes_return() def popitem(self): ... """ # Bundled as a class solely for ease of use: packaging, doc strings, # importability. @staticmethod def appender(fn): """Tag the method as the collection appender. The appender method is called with one positional argument: the value to append. The method will be automatically decorated with 'adds(1)' if not already decorated:: @collection.appender def add(self, append): ... # or, equivalently @collection.appender @collection.adds(1) def add(self, append): ... # for mapping type, an 'append' may kick out a previous value # that occupies that slot. consider d['a'] = 'foo'- any previous # value in d['a'] is discarded. @collection.appender @collection.replaces(1) def add(self, entity): key = some_key_func(entity) previous = None if key in self: previous = self[key] self[key] = entity return previous If the value to append is not allowed in the collection, you may raise an exception. Something to remember is that the appender will be called for each object mapped by a database query. If the database contains rows that violate your collection semantics, you will need to get creative to fix the problem, as access via the collection will not work. If the appender method is internally instrumented, you must also receive the keyword argument '_sa_initiator' and ensure its promulgation to collection events. """ fn._sa_instrument_role = 'appender' return fn @staticmethod def remover(fn): """Tag the method as the collection remover. The remover method is called with one positional argument: the value to remove. The method will be automatically decorated with :meth:`removes_return` if not already decorated:: @collection.remover def zap(self, entity): ... # or, equivalently @collection.remover @collection.removes_return() def zap(self, ): ... If the value to remove is not present in the collection, you may raise an exception or return None to ignore the error. If the remove method is internally instrumented, you must also receive the keyword argument '_sa_initiator' and ensure its promulgation to collection events. """ fn._sa_instrument_role = 'remover' return fn @staticmethod def iterator(fn): """Tag the method as the collection remover. The iterator method is called with no arguments. It is expected to return an iterator over all collection members:: @collection.iterator def __iter__(self): ... """ fn._sa_instrument_role = 'iterator' return fn @staticmethod def internally_instrumented(fn): """Tag the method as instrumented. This tag will prevent any decoration from being applied to the method. Use this if you are orchestrating your own calls to :func:`.collection_adapter` in one of the basic SQLAlchemy interface methods, or to prevent an automatic ABC method decoration from wrapping your implementation:: # normally an 'extend' method on a list-like class would be # automatically intercepted and re-implemented in terms of # SQLAlchemy events and append(). your implementation will # never be called, unless: @collection.internally_instrumented def extend(self, items): ... """ fn._sa_instrumented = True return fn @staticmethod def linker(fn): """Tag the method as a "linked to attribute" event handler. This optional event handler will be called when the collection class is linked to or unlinked from the InstrumentedAttribute. It is invoked immediately after the '_sa_adapter' property is set on the instance. A single argument is passed: the collection adapter that has been linked, or None if unlinking. .. deprecated:: 1.0.0 - the :meth:`.collection.linker` handler is superseded by the :meth:`.AttributeEvents.init_collection` and :meth:`.AttributeEvents.dispose_collection` handlers. """ fn._sa_instrument_role = 'linker' return fn link = linker """deprecated; synonym for :meth:`.collection.linker`.""" @staticmethod def converter(fn): """Tag the method as the collection converter. This optional method will be called when a collection is being replaced entirely, as in:: myobj.acollection = [newvalue1, newvalue2] The converter method will receive the object being assigned and should return an iterable of values suitable for use by the ``appender`` method. A converter must not assign values or mutate the collection, its sole job is to adapt the value the user provides into an iterable of values for the ORM's use. The default converter implementation will use duck-typing to do the conversion. A dict-like collection will be convert into an iterable of dictionary values, and other types will simply be iterated:: @collection.converter def convert(self, other): ... If the duck-typing of the object does not match the type of this collection, a TypeError is raised. Supply an implementation of this method if you want to expand the range of possible types that can be assigned in bulk or perform validation on the values about to be assigned. """ fn._sa_instrument_role = 'converter' return fn @staticmethod def adds(arg): """Mark the method as adding an entity to the collection. Adds "add to collection" handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value. Arguments can be specified positionally (i.e. integer) or by name:: @collection.adds(1) def push(self, item): ... @collection.adds('entity') def do_stuff(self, thing, entity=None): ... """ def decorator(fn): fn._sa_instrument_before = ('fire_append_event', arg) return fn return decorator @staticmethod def replaces(arg): """Mark the method as replacing an entity in the collection. Adds "add to collection" and "remove from collection" handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value to be added, and return value, if any will be considered the value to remove. Arguments can be specified positionally (i.e. integer) or by name:: @collection.replaces(2) def __setitem__(self, index, item): ... """ def decorator(fn): fn._sa_instrument_before = ('fire_append_event', arg) fn._sa_instrument_after = 'fire_remove_event' return fn return decorator @staticmethod def removes(arg): """Mark the method as removing an entity in the collection. Adds "remove from collection" handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value to be removed. Arguments can be specified positionally (i.e. integer) or by name:: @collection.removes(1) def zap(self, item): ... For methods where the value to remove is not known at call-time, use collection.removes_return. """ def decorator(fn): fn._sa_instrument_before = ('fire_remove_event', arg) return fn return decorator @staticmethod def removes_return(): """Mark the method as removing an entity in the collection. Adds "remove from collection" handling to the method. The return value of the method, if any, is considered the value to remove. The method arguments are not inspected:: @collection.removes_return() def pop(self): ... For methods where the value to remove is known at call-time, use collection.remove. """ def decorator(fn): fn._sa_instrument_after = 'fire_remove_event' return fn return decorator collection_adapter = operator.attrgetter('_sa_adapter') """Fetch the :class:`.CollectionAdapter` for a collection.""" class CollectionAdapter(object): """Bridges between the ORM and arbitrary Python collections. Proxies base-level collection operations (append, remove, iterate) to the underlying Python collection, and emits add/remove events for entities entering or leaving the collection. The ORM uses :class:`.CollectionAdapter` exclusively for interaction with entity collections. """ __slots__ = ( 'attr', '_key', '_data', 'owner_state', '_converter', 'invalidated') def __init__(self, attr, owner_state, data): self.attr = attr self._key = attr.key self._data = weakref.ref(data) self.owner_state = owner_state data._sa_adapter = self self._converter = data._sa_converter self.invalidated = False def _warn_invalidated(self): util.warn("This collection has been invalidated.") @property def data(self): "The entity collection being adapted." return self._data() @property def _referenced_by_owner(self): """return True if the owner state still refers to this collection. This will return False within a bulk replace operation, where this collection is the one being replaced. """ return self.owner_state.dict[self._key] is self._data() def bulk_appender(self): return self._data()._sa_appender def append_with_event(self, item, initiator=None): """Add an entity to the collection, firing mutation events.""" self._data()._sa_appender(item, _sa_initiator=initiator) def append_without_event(self, item): """Add or restore an entity to the collection, firing no events.""" self._data()._sa_appender(item, _sa_initiator=False) def append_multiple_without_event(self, items): """Add or restore an entity to the collection, firing no events.""" appender = self._data()._sa_appender for item in items: appender(item, _sa_initiator=False) def bulk_remover(self): return self._data()._sa_remover def remove_with_event(self, item, initiator=None): """Remove an entity from the collection, firing mutation events.""" self._data()._sa_remover(item, _sa_initiator=initiator) def remove_without_event(self, item): """Remove an entity from the collection, firing no events.""" self._data()._sa_remover(item, _sa_initiator=False) def clear_with_event(self, initiator=None): """Empty the collection, firing a mutation event for each entity.""" remover = self._data()._sa_remover for item in list(self): remover(item, _sa_initiator=initiator) def clear_without_event(self): """Empty the collection, firing no events.""" remover = self._data()._sa_remover for item in list(self): remover(item, _sa_initiator=False) def __iter__(self): """Iterate over entities in the collection.""" return iter(self._data()._sa_iterator()) def __len__(self): """Count entities in the collection.""" return len(list(self._data()._sa_iterator())) def __bool__(self): return True __nonzero__ = __bool__ def fire_append_event(self, item, initiator=None): """Notify that a entity has entered the collection. Initiator is a token owned by the InstrumentedAttribute that initiated the membership mutation, and should be left as None unless you are passing along an initiator value from a chained operation. """ if initiator is not False: if self.invalidated: self._warn_invalidated() return self.attr.fire_append_event( self.owner_state, self.owner_state.dict, item, initiator) else: return item def fire_remove_event(self, item, initiator=None): """Notify that a entity has been removed from the collection. Initiator is the InstrumentedAttribute that initiated the membership mutation, and should be left as None unless you are passing along an initiator value from a chained operation. """ if initiator is not False: if self.invalidated: self._warn_invalidated() self.attr.fire_remove_event( self.owner_state, self.owner_state.dict, item, initiator) def fire_pre_remove_event(self, initiator=None): """Notify that an entity is about to be removed from the collection. Only called if the entity cannot be removed after calling fire_remove_event(). """ if self.invalidated: self._warn_invalidated() self.attr.fire_pre_remove_event( self.owner_state, self.owner_state.dict, initiator=initiator) def __getstate__(self): return {'key': self._key, 'owner_state': self.owner_state, 'owner_cls': self.owner_state.class_, 'data': self.data, 'invalidated': self.invalidated} def __setstate__(self, d): self._key = d['key'] self.owner_state = d['owner_state'] self._data = weakref.ref(d['data']) self._converter = d['data']._sa_converter d['data']._sa_adapter = self self.invalidated = d['invalidated'] self.attr = getattr(d['owner_cls'], self._key).impl def bulk_replace(values, existing_adapter, new_adapter): """Load a new collection, firing events based on prior like membership. Appends instances in ``values`` onto the ``new_adapter``. Events will be fired for any instance not present in the ``existing_adapter``. Any instances in ``existing_adapter`` not present in ``values`` will have remove events fired upon them. :param values: An iterable of collection member instances :param existing_adapter: A :class:`.CollectionAdapter` of instances to be replaced :param new_adapter: An empty :class:`.CollectionAdapter` to load with ``values`` """ assert isinstance(values, list) idset = util.IdentitySet existing_idset = idset(existing_adapter or ()) constants = existing_idset.intersection(values or ()) additions = idset(values or ()).difference(constants) removals = existing_idset.difference(constants) appender = new_adapter.bulk_appender() for member in values or (): if member in additions: appender(member) elif member in constants: appender(member, _sa_initiator=False) if existing_adapter: remover = existing_adapter.bulk_remover() for member in removals: remover(member) def prepare_instrumentation(factory): """Prepare a callable for future use as a collection class factory. Given a collection class factory (either a type or no-arg callable), return another factory that will produce compatible instances when called. This function is responsible for converting collection_class=list into the run-time behavior of collection_class=InstrumentedList. """ # Convert a builtin to 'Instrumented*' if factory in __canned_instrumentation: factory = __canned_instrumentation[factory] # Create a specimen cls = type(factory()) # Did factory callable return a builtin? if cls in __canned_instrumentation: # Wrap it so that it returns our 'Instrumented*' factory = __converting_factory(cls, factory) cls = factory() # Instrument the class if needed. if __instrumentation_mutex.acquire(): try: if getattr(cls, '_sa_instrumented', None) != id(cls): _instrument_class(cls) finally: __instrumentation_mutex.release() return factory def __converting_factory(specimen_cls, original_factory): """Return a wrapper that converts a "canned" collection like set, dict, list into the Instrumented* version. """ instrumented_cls = __canned_instrumentation[specimen_cls] def wrapper(): collection = original_factory() return instrumented_cls(collection) # often flawed but better than nothing wrapper.__name__ = "%sWrapper" % original_factory.__name__ wrapper.__doc__ = original_factory.__doc__ return wrapper def _instrument_class(cls): """Modify methods in a class and install instrumentation.""" # In the normal call flow, a request for any of the 3 basic collection # types is transformed into one of our trivial subclasses # (e.g. InstrumentedList). Catch anything else that sneaks in here... if cls.__module__ == '__builtin__': raise sa_exc.ArgumentError( "Can not instrument a built-in type. Use a " "subclass, even a trivial one.") roles, methods = _locate_roles_and_methods(cls) _setup_canned_roles(cls, roles, methods) _assert_required_roles(cls, roles, methods) _set_collection_attributes(cls, roles, methods) def _locate_roles_and_methods(cls): """search for _sa_instrument_role-decorated methods in method resolution order, assign to roles. """ roles = {} methods = {} for supercls in cls.__mro__: for name, method in vars(supercls).items(): if not util.callable(method): continue # note role declarations if hasattr(method, '_sa_instrument_role'): role = method._sa_instrument_role assert role in ('appender', 'remover', 'iterator', 'linker', 'converter') roles.setdefault(role, name) # transfer instrumentation requests from decorated function # to the combined queue before, after = None, None if hasattr(method, '_sa_instrument_before'): op, argument = method._sa_instrument_before assert op in ('fire_append_event', 'fire_remove_event') before = op, argument if hasattr(method, '_sa_instrument_after'): op = method._sa_instrument_after assert op in ('fire_append_event', 'fire_remove_event') after = op if before: methods[name] = before + (after, ) elif after: methods[name] = None, None, after return roles, methods def _setup_canned_roles(cls, roles, methods): """see if this class has "canned" roles based on a known collection type (dict, set, list). Apply those roles as needed to the "roles" dictionary, and also prepare "decorator" methods """ collection_type = util.duck_type_collection(cls) if collection_type in __interfaces: canned_roles, decorators = __interfaces[collection_type] for role, name in canned_roles.items(): roles.setdefault(role, name) # apply ABC auto-decoration to methods that need it for method, decorator in decorators.items(): fn = getattr(cls, method, None) if (fn and method not in methods and not hasattr(fn, '_sa_instrumented')): setattr(cls, method, decorator(fn)) def _assert_required_roles(cls, roles, methods): """ensure all roles are present, and apply implicit instrumentation if needed """ if 'appender' not in roles or not hasattr(cls, roles['appender']): raise sa_exc.ArgumentError( "Type %s must elect an appender method to be " "a collection class" % cls.__name__) elif (roles['appender'] not in methods and not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')): methods[roles['appender']] = ('fire_append_event', 1, None) if 'remover' not in roles or not hasattr(cls, roles['remover']): raise sa_exc.ArgumentError( "Type %s must elect a remover method to be " "a collection class" % cls.__name__) elif (roles['remover'] not in methods and not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')): methods[roles['remover']] = ('fire_remove_event', 1, None) if 'iterator' not in roles or not hasattr(cls, roles['iterator']): raise sa_exc.ArgumentError( "Type %s must elect an iterator method to be " "a collection class" % cls.__name__) def _set_collection_attributes(cls, roles, methods): """apply ad-hoc instrumentation from decorators, class-level defaults and implicit role declarations """ for method_name, (before, argument, after) in methods.items(): setattr(cls, method_name, _instrument_membership_mutator(getattr(cls, method_name), before, argument, after)) # intern the role map for role, method_name in roles.items(): setattr(cls, '_sa_%s' % role, getattr(cls, method_name)) cls._sa_adapter = None if not hasattr(cls, '_sa_converter'): cls._sa_converter = None cls._sa_instrumented = id(cls) def _instrument_membership_mutator(method, before, argument, after): """Route method args and/or return value through the collection adapter.""" # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' if before: fn_args = list(util.flatten_iterator(inspect_getargspec(method)[0])) if isinstance(argument, int): pos_arg = argument named_arg = len(fn_args) > argument and fn_args[argument] or None else: if argument in fn_args: pos_arg = fn_args.index(argument) else: pos_arg = None named_arg = argument del fn_args def wrapper(*args, **kw): if before: if pos_arg is None: if named_arg not in kw: raise sa_exc.ArgumentError( "Missing argument %s" % argument) value = kw[named_arg] else: if len(args) > pos_arg: value = args[pos_arg] elif named_arg in kw: value = kw[named_arg] else: raise sa_exc.ArgumentError( "Missing argument %s" % argument) initiator = kw.pop('_sa_initiator', None) if initiator is False: executor = None else: executor = args[0]._sa_adapter if before and executor: getattr(executor, before)(value, initiator) if not after or not executor: return method(*args, **kw) else: res = method(*args, **kw) if res is not None: getattr(executor, after)(res, initiator) return res wrapper._sa_instrumented = True if hasattr(method, "_sa_instrument_role"): wrapper._sa_instrument_role = method._sa_instrument_role wrapper.__name__ = method.__name__ wrapper.__doc__ = method.__doc__ return wrapper def __set(collection, item, _sa_initiator=None): """Run set events, may eventually be inlined into decorators.""" if _sa_initiator is not False: executor = collection._sa_adapter if executor: item = executor.fire_append_event(item, _sa_initiator) return item def __del(collection, item, _sa_initiator=None): """Run del events, may eventually be inlined into decorators.""" if _sa_initiator is not False: executor = collection._sa_adapter if executor: executor.fire_remove_event(item, _sa_initiator) def __before_delete(collection, _sa_initiator=None): """Special method to run 'commit existing value' methods""" executor = collection._sa_adapter if executor: executor.fire_pre_remove_event(_sa_initiator) def _list_decorators(): """Tailored instrumentation wrappers for any list-like class.""" def _tidy(fn): fn._sa_instrumented = True fn.__doc__ = getattr(list, fn.__name__).__doc__ def append(fn): def append(self, item, _sa_initiator=None): item = __set(self, item, _sa_initiator) fn(self, item) _tidy(append) return append def remove(fn): def remove(self, value, _sa_initiator=None): __before_delete(self, _sa_initiator) # testlib.pragma exempt:__eq__ fn(self, value) __del(self, value, _sa_initiator) _tidy(remove) return remove def insert(fn): def insert(self, index, value): value = __set(self, value) fn(self, index, value) _tidy(insert) return insert def __setitem__(fn): def __setitem__(self, index, value): if not isinstance(index, slice): existing = self[index] if existing is not None: __del(self, existing) value = __set(self, value) fn(self, index, value) else: # slice assignment requires __delitem__, insert, __len__ step = index.step or 1 start = index.start or 0 if start < 0: start += len(self) if index.stop is not None: stop = index.stop else: stop = len(self) if stop < 0: stop += len(self) if step == 1: for i in range(start, stop, step): if len(self) > start: del self[start] for i, item in enumerate(value): self.insert(i + start, item) else: rng = list(range(start, stop, step)) if len(value) != len(rng): raise ValueError( "attempt to assign sequence of size %s to " "extended slice of size %s" % (len(value), len(rng))) for i, item in zip(rng, value): self.__setitem__(i, item) _tidy(__setitem__) return __setitem__ def __delitem__(fn): def __delitem__(self, index): if not isinstance(index, slice): item = self[index] __del(self, item) fn(self, index) else: # slice deletion requires __getslice__ and a slice-groking # __getitem__ for stepped deletion # note: not breaking this into atomic dels for item in self[index]: __del(self, item) fn(self, index) _tidy(__delitem__) return __delitem__ if util.py2k: def __setslice__(fn): def __setslice__(self, start, end, values): for value in self[start:end]: __del(self, value) values = [__set(self, value) for value in values] fn(self, start, end, values) _tidy(__setslice__) return __setslice__ def __delslice__(fn): def __delslice__(self, start, end): for value in self[start:end]: __del(self, value) fn(self, start, end) _tidy(__delslice__) return __delslice__ def extend(fn): def extend(self, iterable): for value in iterable: self.append(value) _tidy(extend) return extend def __iadd__(fn): def __iadd__(self, iterable): # list.__iadd__ takes any iterable and seems to let TypeError # raise as-is instead of returning NotImplemented for value in iterable: self.append(value) return self _tidy(__iadd__) return __iadd__ def pop(fn): def pop(self, index=-1): __before_delete(self) item = fn(self, index) __del(self, item) return item _tidy(pop) return pop if not util.py2k: def clear(fn): def clear(self, index=-1): for item in self: __del(self, item) fn(self) _tidy(clear) return clear # __imul__ : not wrapping this. all members of the collection are already # present, so no need to fire appends... wrapping it with an explicit # decorator is still possible, so events on *= can be had if they're # desired. hard to imagine a use case for __imul__, though. l = locals().copy() l.pop('_tidy') return l def _dict_decorators(): """Tailored instrumentation wrappers for any dict-like mapping class.""" def _tidy(fn): fn._sa_instrumented = True fn.__doc__ = getattr(dict, fn.__name__).__doc__ Unspecified = util.symbol('Unspecified') def __setitem__(fn): def __setitem__(self, key, value, _sa_initiator=None): if key in self: __del(self, self[key], _sa_initiator) value = __set(self, value, _sa_initiator) fn(self, key, value) _tidy(__setitem__) return __setitem__ def __delitem__(fn): def __delitem__(self, key, _sa_initiator=None): if key in self: __del(self, self[key], _sa_initiator) fn(self, key) _tidy(__delitem__) return __delitem__ def clear(fn): def clear(self): for key in self: __del(self, self[key]) fn(self) _tidy(clear) return clear def pop(fn): def pop(self, key, default=Unspecified): if key in self: __del(self, self[key]) if default is Unspecified: return fn(self, key) else: return fn(self, key, default) _tidy(pop) return pop def popitem(fn): def popitem(self): __before_delete(self) item = fn(self) __del(self, item[1]) return item _tidy(popitem) return popitem def setdefault(fn): def setdefault(self, key, default=None): if key not in self: self.__setitem__(key, default) return default else: return self.__getitem__(key) _tidy(setdefault) return setdefault def update(fn): def update(self, __other=Unspecified, **kw): if __other is not Unspecified: if hasattr(__other, 'keys'): for key in list(__other): if (key not in self or self[key] is not __other[key]): self[key] = __other[key] else: for key, value in __other: if key not in self or self[key] is not value: self[key] = value for key in kw: if key not in self or self[key] is not kw[key]: self[key] = kw[key] _tidy(update) return update l = locals().copy() l.pop('_tidy') l.pop('Unspecified') return l _set_binop_bases = (set, frozenset) def _set_binops_check_strict(self, obj): """Allow only set, frozenset and self.__class__-derived objects in binops.""" return isinstance(obj, _set_binop_bases + (self.__class__,)) def _set_binops_check_loose(self, obj): """Allow anything set-like to participate in set binops.""" return (isinstance(obj, _set_binop_bases + (self.__class__,)) or util.duck_type_collection(obj) == set) def _set_decorators(): """Tailored instrumentation wrappers for any set-like class.""" def _tidy(fn): fn._sa_instrumented = True fn.__doc__ = getattr(set, fn.__name__).__doc__ Unspecified = util.symbol('Unspecified') def add(fn): def add(self, value, _sa_initiator=None): if value not in self: value = __set(self, value, _sa_initiator) # testlib.pragma exempt:__hash__ fn(self, value) _tidy(add) return add def discard(fn): def discard(self, value, _sa_initiator=None): # testlib.pragma exempt:__hash__ if value in self: __del(self, value, _sa_initiator) # testlib.pragma exempt:__hash__ fn(self, value) _tidy(discard) return discard def remove(fn): def remove(self, value, _sa_initiator=None): # testlib.pragma exempt:__hash__ if value in self: __del(self, value, _sa_initiator) # testlib.pragma exempt:__hash__ fn(self, value) _tidy(remove) return remove def pop(fn): def pop(self): __before_delete(self) item = fn(self) __del(self, item) return item _tidy(pop) return pop def clear(fn): def clear(self): for item in list(self): self.remove(item) _tidy(clear) return clear def update(fn): def update(self, value): for item in value: self.add(item) _tidy(update) return update def __ior__(fn): def __ior__(self, value): if not _set_binops_check_strict(self, value): return NotImplemented for item in value: self.add(item) return self _tidy(__ior__) return __ior__ def difference_update(fn): def difference_update(self, value): for item in value: self.discard(item) _tidy(difference_update) return difference_update def __isub__(fn): def __isub__(self, value): if not _set_binops_check_strict(self, value): return NotImplemented for item in value: self.discard(item) return self _tidy(__isub__) return __isub__ def intersection_update(fn): def intersection_update(self, other): want, have = self.intersection(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) _tidy(intersection_update) return intersection_update def __iand__(fn): def __iand__(self, other): if not _set_binops_check_strict(self, other): return NotImplemented want, have = self.intersection(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) return self _tidy(__iand__) return __iand__ def symmetric_difference_update(fn): def symmetric_difference_update(self, other): want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) _tidy(symmetric_difference_update) return symmetric_difference_update def __ixor__(fn): def __ixor__(self, other): if not _set_binops_check_strict(self, other): return NotImplemented want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) return self _tidy(__ixor__) return __ixor__ l = locals().copy() l.pop('_tidy') l.pop('Unspecified') return l class InstrumentedList(list): """An instrumented version of the built-in list.""" class InstrumentedSet(set): """An instrumented version of the built-in set.""" class InstrumentedDict(dict): """An instrumented version of the built-in dict.""" __canned_instrumentation = { list: InstrumentedList, set: InstrumentedSet, dict: InstrumentedDict, } __interfaces = { list: ( {'appender': 'append', 'remover': 'remove', 'iterator': '__iter__'}, _list_decorators() ), set: ({'appender': 'add', 'remover': 'remove', 'iterator': '__iter__'}, _set_decorators() ), # decorators are required for dicts and object collections. dict: ({'iterator': 'values'}, _dict_decorators()) if util.py3k else ({'iterator': 'itervalues'}, _dict_decorators()), } class MappedCollection(dict): """A basic dictionary-based collection class. Extends dict with the minimal bag semantics that collection classes require. ``set`` and ``remove`` are implemented in terms of a keying function: any callable that takes an object and returns an object for use as a dictionary key. """ def __init__(self, keyfunc): """Create a new collection with keying provided by keyfunc. keyfunc may be any callable that takes an object and returns an object for use as a dictionary key. The keyfunc will be called every time the ORM needs to add a member by value-only (such as when loading instances from the database) or remove a member. The usual cautions about dictionary keying apply- ``keyfunc(object)`` should return the same output for the life of the collection. Keying based on mutable properties can result in unreachable instances "lost" in the collection. """ self.keyfunc = keyfunc @collection.appender @collection.internally_instrumented def set(self, value, _sa_initiator=None): """Add an item by value, consulting the keyfunc for the key.""" key = self.keyfunc(value) self.__setitem__(key, value, _sa_initiator) @collection.remover @collection.internally_instrumented def remove(self, value, _sa_initiator=None): """Remove an item by value, consulting the keyfunc for the key.""" key = self.keyfunc(value) # Let self[key] raise if key is not in this collection # testlib.pragma exempt:__ne__ if self[key] != value: raise sa_exc.InvalidRequestError( "Can not remove '%s': collection holds '%s' for key '%s'. " "Possible cause: is the MappedCollection key function " "based on mutable properties or properties that only obtain " "values after flush?" % (value, self[key], key)) self.__delitem__(key, _sa_initiator) @collection.converter def _convert(self, dictlike): """Validate and convert a dict-like object into values for set()ing. This is called behind the scenes when a MappedCollection is replaced entirely by another collection, as in:: myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... Raises a TypeError if the key in any (key, value) pair in the dictlike object does not match the key that this collection's keyfunc would have assigned for that value. """ for incoming_key, value in util.dictlike_iteritems(dictlike): new_key = self.keyfunc(value) if incoming_key != new_key: raise TypeError( "Found incompatible key %r for value %r; this " "collection's " "keying function requires a key of %r for this value." % ( incoming_key, value, new_key)) yield value # ensure instrumentation is associated with # these built-in classes; if a user-defined class # subclasses these and uses @internally_instrumented, # the superclass is otherwise not instrumented. # see [ticket:2406]. _instrument_class(MappedCollection) _instrument_class(InstrumentedList) _instrument_class(InstrumentedSet)
mit
jd/hyde
hyde/tests/test_initialize.py
6
3678
# -*- coding: utf-8 -*- """ Use nose `$ pip install nose` `$ nosetests` """ from hyde.engine import Engine from hyde.exceptions import HydeException from hyde.layout import Layout from fswrap import File, Folder from nose.tools import raises, with_setup, nottest TEST_SITE = File(__file__).parent.child_folder('_test') TEST_SITE_AT_USER = Folder('~/_test') @nottest def create_test_site(): TEST_SITE.make() @nottest def delete_test_site(): TEST_SITE.delete() @nottest def create_test_site_at_user(): TEST_SITE_AT_USER.make() @nottest def delete_test_site_at_user(): TEST_SITE_AT_USER.delete() @raises(HydeException) @with_setup(create_test_site, delete_test_site) def test_ensure_exception_when_site_yaml_exists(): e = Engine(raise_exceptions=True) File(TEST_SITE.child('site.yaml')).write("Hey") e.run(e.parse(['-s', unicode(TEST_SITE), 'create'])) @raises(HydeException) @with_setup(create_test_site, delete_test_site) def test_ensure_exception_when_content_folder_exists(): e = Engine(raise_exceptions=True) TEST_SITE.child_folder('content').make() e.run(e.parse(['-s', unicode(TEST_SITE), 'create'])) @raises(HydeException) @with_setup(create_test_site, delete_test_site) def test_ensure_exception_when_layout_folder_exists(): e = Engine(raise_exceptions=True) TEST_SITE.child_folder('layout').make() e.run(e.parse(['-s', unicode(TEST_SITE), 'create'])) @with_setup(create_test_site, delete_test_site) def test_ensure_no_exception_when_empty_site_exists(): e = Engine(raise_exceptions=True) e.run(e.parse(['-s', unicode(TEST_SITE), 'create'])) verify_site_contents(TEST_SITE, Layout.find_layout()) @with_setup(create_test_site, delete_test_site) def test_ensure_no_exception_when_forced(): e = Engine(raise_exceptions=True) TEST_SITE.child_folder('layout').make() e.run(e.parse(['-s', unicode(TEST_SITE), 'create', '-f'])) verify_site_contents(TEST_SITE, Layout.find_layout()) TEST_SITE.delete() TEST_SITE.child_folder('content').make() e.run(e.parse(['-s', unicode(TEST_SITE), 'create', '-f'])) verify_site_contents(TEST_SITE, Layout.find_layout()) TEST_SITE.delete() TEST_SITE.make() File(TEST_SITE.child('site.yaml')).write("Hey") e.run(e.parse(['-s', unicode(TEST_SITE), 'create', '-f'])) verify_site_contents(TEST_SITE, Layout.find_layout()) @with_setup(create_test_site, delete_test_site) def test_ensure_no_exception_when_sitepath_does_not_exist(): e = Engine(raise_exceptions=True) TEST_SITE.delete() e.run(e.parse(['-s', unicode(TEST_SITE), 'create', '-f'])) verify_site_contents(TEST_SITE, Layout.find_layout()) @with_setup(create_test_site_at_user, delete_test_site_at_user) def test_ensure_can_create_site_at_user(): e = Engine(raise_exceptions=True) TEST_SITE_AT_USER.delete() e.run(e.parse(['-s', unicode(TEST_SITE_AT_USER), 'create', '-f'])) verify_site_contents(TEST_SITE_AT_USER, Layout.find_layout()) @nottest def verify_site_contents(site, layout): assert site.exists assert site.child_folder('layout').exists assert File(site.child('info.yaml')).exists expected = map( lambda f: f.get_relative_path(layout), layout.walker.walk_all()) actual = map(lambda f: f.get_relative_path(site), site.walker.walk_all()) assert actual assert expected expected.sort() actual.sort() assert actual == expected @raises(HydeException) @with_setup(create_test_site, delete_test_site) def test_ensure_exception_when_layout_is_invalid(): e = Engine(raise_exceptions=True) e.run(e.parse(['-s', unicode(TEST_SITE), 'create', '-l', 'junk']))
mit
kg-bot/SupyBot
plugins/IMDb/__init__.py
1
1149
### # Copyright (c) 2012, Dan # All rights reserved. # # ### """ Add a description of the plugin (to be presented to the user inside the wizard) here. This should describe *what* the plugin does. """ import supybot import supybot.world as world # Use this for the version of this plugin. You may wish to put a CVS keyword # in here if you're keeping the plugin in CVS or some similar system. __version__ = "" # XXX Replace this with an appropriate author or supybot.Author instance. __author__ = supybot.authors.unknown # This is a dictionary mapping supybot.Author instances to lists of # contributions. __contributors__ = {} # This is a url where the most recent plugin package can be downloaded. __url__ = '' # 'http://supybot.com/Members/yourname/IMDb/download' import config import plugin reload(plugin) # In case we're being reloaded. # Add more reloads here if you add third-party modules and want them to be # reloaded when this plugin is reloaded. Don't forget to import them as well! if world.testing: import test Class = plugin.Class configure = config.configure # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
gpl-3.0
ulope/django
tests/get_or_create/tests.py
19
13380
from __future__ import unicode_literals from datetime import date import traceback import warnings from django.db import IntegrityError, DatabaseError from django.utils.encoding import DjangoUnicodeDecodeError from django.test import TestCase, TransactionTestCase from .models import (DefaultPerson, Person, ManualPrimaryKeyTest, Profile, Tag, Thing, Publisher, Author, Book) class GetOrCreateTests(TestCase): def setUp(self): self.lennon = Person.objects.create( first_name='John', last_name='Lennon', birthday=date(1940, 10, 9) ) def test_get_or_create_method_with_get(self): created = Person.objects.get_or_create( first_name="John", last_name="Lennon", defaults={ "birthday": date(1940, 10, 9) } )[1] self.assertFalse(created) self.assertEqual(Person.objects.count(), 1) def test_get_or_create_method_with_create(self): created = Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults={ 'birthday': date(1943, 2, 25) } )[1] self.assertTrue(created) self.assertEqual(Person.objects.count(), 2) def test_get_or_create_redundant_instance(self): """ If we execute the exact same statement twice, the second time, it won't create a Person. """ Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults={ 'birthday': date(1943, 2, 25) } ) created = Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults={ 'birthday': date(1943, 2, 25) } )[1] self.assertFalse(created) self.assertEqual(Person.objects.count(), 2) def test_get_or_create_invalid_params(self): """ If you don't specify a value or default value for all required fields, you will get an error. """ self.assertRaises( IntegrityError, Person.objects.get_or_create, first_name="Tom", last_name="Smith" ) def test_get_or_create_on_related_manager(self): p = Publisher.objects.create(name="Acme Publishing") # Create a book through the publisher. book, created = p.books.get_or_create(name="The Book of Ed & Fred") self.assertTrue(created) # The publisher should have one book. self.assertEqual(p.books.count(), 1) # Try get_or_create again, this time nothing should be created. book, created = p.books.get_or_create(name="The Book of Ed & Fred") self.assertFalse(created) # And the publisher should still have one book. self.assertEqual(p.books.count(), 1) # Add an author to the book. ed, created = book.authors.get_or_create(name="Ed") self.assertTrue(created) # The book should have one author. self.assertEqual(book.authors.count(), 1) # Try get_or_create again, this time nothing should be created. ed, created = book.authors.get_or_create(name="Ed") self.assertFalse(created) # And the book should still have one author. self.assertEqual(book.authors.count(), 1) # Add a second author to the book. fred, created = book.authors.get_or_create(name="Fred") self.assertTrue(created) # The book should have two authors now. self.assertEqual(book.authors.count(), 2) # Create an Author not tied to any books. Author.objects.create(name="Ted") # There should be three Authors in total. The book object should have two. self.assertEqual(Author.objects.count(), 3) self.assertEqual(book.authors.count(), 2) # Try creating a book through an author. _, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p) self.assertTrue(created) # Now Ed has two Books, Fred just one. self.assertEqual(ed.books.count(), 2) self.assertEqual(fred.books.count(), 1) # Use the publisher's primary key value instead of a model instance. _, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id) self.assertTrue(created) # Try get_or_create again, this time nothing should be created. _, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id) self.assertFalse(created) # The publisher should have three books. self.assertEqual(p.books.count(), 3) class GetOrCreateTestsWithManualPKs(TestCase): def setUp(self): self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original") def test_create_with_duplicate_primary_key(self): """ If you specify an existing primary key, but different other fields, then you will get an error and data will not be updated. """ self.assertRaises( IntegrityError, ManualPrimaryKeyTest.objects.get_or_create, id=1, data="Different" ) self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") def test_get_or_create_raises_IntegrityError_plus_traceback(self): """ get_or_create should raise IntegrityErrors with the full traceback. This is tested by checking that a known method call is in the traceback. We cannot use assertRaises here because we need to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different") except IntegrityError: formatted_traceback = traceback.format_exc() self.assertIn(str('obj.save'), formatted_traceback) def test_savepoint_rollback(self): """ Regression test for #20463: the database connection should still be usable after a DataError or ProgrammingError in .get_or_create(). """ try: # Hide warnings when broken data is saved with a warning (MySQL). with warnings.catch_warnings(): warnings.simplefilter('ignore') Person.objects.get_or_create( birthday=date(1970, 1, 1), defaults={'first_name': b"\xff", 'last_name': b"\xff"}) except (DatabaseError, DjangoUnicodeDecodeError): Person.objects.create( first_name="Bob", last_name="Ross", birthday=date(1950, 1, 1)) else: self.skipTest("This backend accepts broken utf-8.") def test_get_or_create_empty(self): """ Regression test for #16137: get_or_create does not require kwargs. """ try: DefaultPerson.objects.get_or_create() except AssertionError: self.fail("If all the attributes on a model have defaults, we " "shouldn't need to pass any arguments.") class GetOrCreateTransactionTests(TransactionTestCase): available_apps = ['get_or_create'] def test_get_or_create_integrityerror(self): """ Regression test for #15117. Requires a TransactionTestCase on databases that delay integrity checks until the end of transactions, otherwise the exception is never raised. """ try: Profile.objects.get_or_create(person=Person(id=1)) except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") class GetOrCreateThroughManyToMany(TestCase): def test_get_get_or_create(self): tag = Tag.objects.create(text='foo') a_thing = Thing.objects.create(name='a') a_thing.tags.add(tag) obj, created = a_thing.tags.get_or_create(text='foo') self.assertFalse(created) self.assertEqual(obj.pk, tag.pk) def test_create_get_or_create(self): a_thing = Thing.objects.create(name='a') obj, created = a_thing.tags.get_or_create(text='foo') self.assertTrue(created) self.assertEqual(obj.text, 'foo') self.assertIn(obj, a_thing.tags.all()) def test_something(self): Tag.objects.create(text='foo') a_thing = Thing.objects.create(name='a') self.assertRaises(IntegrityError, a_thing.tags.get_or_create, text='foo') class UpdateOrCreateTests(TestCase): def test_update(self): Person.objects.create( first_name='John', last_name='Lennon', birthday=date(1940, 10, 9) ) p, created = Person.objects.update_or_create( first_name='John', last_name='Lennon', defaults={ 'birthday': date(1940, 10, 10) } ) self.assertFalse(created) self.assertEqual(p.first_name, 'John') self.assertEqual(p.last_name, 'Lennon') self.assertEqual(p.birthday, date(1940, 10, 10)) def test_create(self): p, created = Person.objects.update_or_create( first_name='John', last_name='Lennon', defaults={ 'birthday': date(1940, 10, 10) } ) self.assertTrue(created) self.assertEqual(p.first_name, 'John') self.assertEqual(p.last_name, 'Lennon') self.assertEqual(p.birthday, date(1940, 10, 10)) def test_create_twice(self): params = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': date(1940, 10, 10), } Person.objects.update_or_create(**params) # If we execute the exact same statement, it won't create a Person. p, created = Person.objects.update_or_create(**params) self.assertFalse(created) def test_integrity(self): """ If you don't specify a value or default value for all required fields, you will get an error. """ self.assertRaises(IntegrityError, Person.objects.update_or_create, first_name="Tom", last_name="Smith") def test_manual_primary_key_test(self): """ If you specify an existing primary key, but different other fields, then you will get an error and data will not be updated. """ ManualPrimaryKeyTest.objects.create(id=1, data="Original") self.assertRaises( IntegrityError, ManualPrimaryKeyTest.objects.update_or_create, id=1, data="Different" ) self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") def test_error_contains_full_traceback(self): """ update_or_create should raise IntegrityErrors with the full traceback. This is tested by checking that a known method call is in the traceback. We cannot use assertRaises/assertRaises here because we need to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") except IntegrityError: formatted_traceback = traceback.format_exc() self.assertIn('obj.save', formatted_traceback) def test_create_with_related_manager(self): """ Should be able to use update_or_create from the related manager to create a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") book, created = p.books.update_or_create(name="The Book of Ed & Fred") self.assertTrue(created) self.assertEqual(p.books.count(), 1) def test_update_with_related_manager(self): """ Should be able to use update_or_create from the related manager to update a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") book = Book.objects.create(name="The Book of Ed & Fred", publisher=p) self.assertEqual(p.books.count(), 1) name = "The Book of Django" book, created = p.books.update_or_create(defaults={'name': name}, id=book.id) self.assertFalse(created) self.assertEqual(book.name, name) self.assertEqual(p.books.count(), 1) def test_create_with_many(self): """ Should be able to use update_or_create from the m2m related manager to create a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") author = Author.objects.create(name="Ted") book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p) self.assertTrue(created) self.assertEqual(author.books.count(), 1) def test_update_with_many(self): """ Should be able to use update_or_create from the m2m related manager to update a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") author = Author.objects.create(name="Ted") book = Book.objects.create(name="The Book of Ed & Fred", publisher=p) book.authors.add(author) self.assertEqual(author.books.count(), 1) name = "The Book of Django" book, created = author.books.update_or_create(defaults={'name': name}, id=book.id) self.assertFalse(created) self.assertEqual(book.name, name) self.assertEqual(author.books.count(), 1)
bsd-3-clause
rjshade/grpc
src/python/grpcio/grpc/framework/foundation/logging_pool.py
19
3126
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A thread pool that logs exceptions raised by tasks executed within it.""" import logging from concurrent import futures def _wrap(behavior): """Wraps an arbitrary callable behavior in exception-logging.""" def _wrapping(*args, **kwargs): try: return behavior(*args, **kwargs) except Exception: logging.exception( 'Unexpected exception from %s executed in logging pool!', behavior) raise return _wrapping class _LoggingPool(object): """An exception-logging futures.ThreadPoolExecutor-compatible thread pool.""" def __init__(self, backing_pool): self._backing_pool = backing_pool def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._backing_pool.shutdown(wait=True) def submit(self, fn, *args, **kwargs): return self._backing_pool.submit(_wrap(fn), *args, **kwargs) def map(self, func, *iterables, **kwargs): return self._backing_pool.map( _wrap(func), *iterables, timeout=kwargs.get('timeout', None)) def shutdown(self, wait=True): self._backing_pool.shutdown(wait=wait) def pool(max_workers): """Creates a thread pool that logs exceptions raised by the tasks within it. Args: max_workers: The maximum number of worker threads to allow the pool. Returns: A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions raised by the tasks executed within it. """ return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
bsd-3-clause
Eksmo/calibre
src/calibre/gui2/convert/xpath_wizard_ui.py
1
4063
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/convert/xpath_wizard.ui' # # Created: Thu Jul 19 23:32:30 2012 # by: PyQt4 UI code generator 4.9.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_Form(object): def setupUi(self, Form): Form.setObjectName(_fromUtf8("Form")) Form.resize(400, 381) self.verticalLayout = QtGui.QVBoxLayout(Form) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.label = QtGui.QLabel(Form) self.label.setObjectName(_fromUtf8("label")) self.verticalLayout.addWidget(self.label) self.tag = QtGui.QComboBox(Form) self.tag.setEditable(True) self.tag.setObjectName(_fromUtf8("tag")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.tag.addItem(_fromUtf8("")) self.verticalLayout.addWidget(self.tag) self.label_2 = QtGui.QLabel(Form) self.label_2.setObjectName(_fromUtf8("label_2")) self.verticalLayout.addWidget(self.label_2) self.attribute = QtGui.QLineEdit(Form) self.attribute.setObjectName(_fromUtf8("attribute")) self.verticalLayout.addWidget(self.attribute) self.label_3 = QtGui.QLabel(Form) self.label_3.setObjectName(_fromUtf8("label_3")) self.verticalLayout.addWidget(self.label_3) self.label_4 = QtGui.QLabel(Form) self.label_4.setObjectName(_fromUtf8("label_4")) self.verticalLayout.addWidget(self.label_4) self.value = QtGui.QLineEdit(Form) self.value.setObjectName(_fromUtf8("value")) self.verticalLayout.addWidget(self.value) self.label_5 = QtGui.QLabel(Form) self.label_5.setWordWrap(True) self.label_5.setOpenExternalLinks(True) self.label_5.setObjectName(_fromUtf8("label_5")) self.verticalLayout.addWidget(self.label_5) spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.label.setBuddy(self.tag) self.label_2.setBuddy(self.attribute) self.label_3.setBuddy(self.value) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(_("Form")) self.label.setText(_("Match HTML &tags with tag name:")) self.tag.setItemText(0, _("*")) self.tag.setItemText(1, _("a")) self.tag.setItemText(2, _("br")) self.tag.setItemText(3, _("div")) self.tag.setItemText(4, _("h1")) self.tag.setItemText(5, _("h2")) self.tag.setItemText(6, _("h3")) self.tag.setItemText(7, _("h4")) self.tag.setItemText(8, _("h5")) self.tag.setItemText(9, _("h6")) self.tag.setItemText(10, _("hr")) self.tag.setItemText(11, _("span")) self.label_2.setText(_("Having the &attribute:")) self.label_3.setText(_("With &value:")) self.label_4.setText(_("(A regular expression)")) self.label_5.setText(_("<p>For example, to match all h2 tags that have class=\"chapter\", set tag to <i>h2</i>, attribute to <i>class</i> and value to <i>chapter</i>.</p><p>Leaving attribute blank will match any attribute and leaving value blank will match any value. Setting tag to * will match any tag.</p><p>To learn more advanced usage of XPath see the <a href=\"http://manual.calibre-ebook.com/xpath.html\">XPath Tutorial</a>."))
gpl-3.0
abought/osf.io
api/base/views.py
2
20637
import weakref from django.http import JsonResponse from rest_framework.decorators import api_view from rest_framework.response import Response from rest_framework import generics # from rest_framework.serializers from rest_framework.mixins import ListModelMixin from api.users.serializers import UserSerializer from django.conf import settings as django_settings from .utils import absolute_reverse, is_truthy from .requests import EmbeddedRequest CACHE = weakref.WeakKeyDictionary() class JSONAPIBaseView(generics.GenericAPIView): def __init__(self, **kwargs): assert getattr(self, 'view_name', None), 'Must specify view_name on view.' assert getattr(self, 'view_category', None), 'Must specify view_category on view.' self.view_fqn = ':'.join([self.view_category, self.view_name]) super(JSONAPIBaseView, self).__init__(**kwargs) def _get_embed_partial(self, field_name, field): """Create a partial function to fetch the values of an embedded field. A basic example is to include a Node's children in a single response. :param str field_name: Name of field of the view's serializer_class to load results for :return function object -> dict: """ if getattr(field, 'field', None): field = field.field def partial(item): # resolve must be implemented on the field v, view_args, view_kwargs = field.resolve(item, field_name) if not v: return None if isinstance(self.request._request, EmbeddedRequest): request = self.request._request else: request = EmbeddedRequest(self.request) view_kwargs.update({ 'request': request, 'is_embedded': True }) # Setup a view ourselves to avoid all the junk DRF throws in # v is a function that hides everything v.cls is the actual view class view = v.cls() view.args = view_args view.kwargs = view_kwargs view.request = request view.request.parser_context['kwargs'] = view_kwargs view.format_kwarg = view.get_format_suffix(**view_kwargs) _cache_key = (v.cls, field_name, view.get_serializer_class(), item) if _cache_key in CACHE.setdefault(self.request._request, {}): # We already have the result for this embed, return it return CACHE[self.request._request][_cache_key] # Cache serializers. to_representation of a serializer should NOT augment it's fields so resetting the context # should be sufficient for reuse if not view.get_serializer_class() in CACHE.setdefault(self.request._request, {}): CACHE[self.request._request][view.get_serializer_class()] = view.get_serializer_class()(many=isinstance(view, ListModelMixin)) ser = CACHE[self.request._request][view.get_serializer_class()] try: ser._context = view.get_serializer_context() if not isinstance(view, ListModelMixin): ret = ser.to_representation(view.get_object()) else: queryset = view.filter_queryset(view.get_queryset()) page = view.paginate_queryset(queryset) ret = ser.to_representation(page or queryset) if page is not None: request.parser_context['view'] = view request.parser_context['kwargs'].pop('request') view.paginator.request = request ret = view.paginator.get_paginated_response(ret).data except Exception as e: ret = view.handle_exception(e).data # Allow request to be gc'd ser._context = None # Cache our final result CACHE[self.request._request][_cache_key] = ret return ret return partial def get_serializer_context(self): """Inject request into the serializer context. Additionally, inject partial functions (request, object -> embed items) if the query string contains embeds. Allows multiple levels of nesting. """ context = super(JSONAPIBaseView, self).get_serializer_context() if self.kwargs.get('is_embedded'): embeds = [] else: embeds = self.request.query_params.getlist('embed') fields_check = self.serializer_class._declared_fields.copy() for field in fields_check: if getattr(fields_check[field], 'field', None): fields_check[field] = fields_check[field].field for field in fields_check: if getattr(fields_check[field], 'always_embed', False) and field not in embeds: embeds.append(unicode(field)) if getattr(fields_check[field], 'never_embed', False) and field in embeds: embeds.remove(field) embeds_partials = {} for embed in embeds: embed_field = fields_check.get(embed) embeds_partials[embed] = self._get_embed_partial(embed, embed_field) context.update({ 'enable_esi': ( is_truthy(self.request.query_params.get('esi', django_settings.ENABLE_ESI)) and self.request.accepted_renderer.media_type in django_settings.ESI_MEDIA_TYPES ), 'embed': embeds_partials, 'envelope': self.request.query_params.get('envelope', 'data'), }) return context @api_view(('GET',)) def root(request, format=None): """Welcome to the V2 Open Science Framework API. With this API you can access users, projects, components, logs, and files from the [Open Science Framework](https://osf.io/). The Open Science Framework (OSF) is a free, open-source service maintained by the [Center for Open Science](http://cos.io/). The OSF serves as a repository and archive for study designs, materials, data, manuscripts, or anything else associated with your research during the research process. Every project and file on the OSF has a permanent unique identifier, and every registration (a permanent, time-stamped version of your projects and files) can be assigned a DOI/ARK. You can use the OSF to measure your impact by monitoring the traffic to projects and files you make public. With the OSF you have full control of what parts of your research are public and what remains private. Beta notice: This API is currently a beta service. You are encouraged to use the API and will receive support when doing so, however, while the API remains in beta status, it may change without notice as a result of product updates. The temporary beta status of the API will remain in place while it matures. In a future release, the beta status will be removed, at which point we will provide details on how long we will support the API V2 and under what circumstances it might change. #General API Usage The OSF API generally conforms to the [JSON-API v1.0 spec](http://jsonapi.org/format/1.0/). Where exceptions exist, they will be noted. Each endpoint will have its own documentation, but there are some general principles. ##Requests ###Canonical URLs All canonical URLs have trailing slashes. A request to an endpoint without a trailing slash will result in a 301 redirect to the canonical URL. There are some exceptions when working with the Files API, so if a URL in a response does not have a slash, do not append one. ###Plurals Endpoints are always pluralized. `/users/`, not `/user/`, `/nodes/`, not `/node/`. ###Common Actions Every endpoint in the OSF API responds to `GET`, `HEAD`, and `OPTION` requests. You must have adequate permissions to interact with the endpoint. Unauthorized use will result in 401 Unauthorized or 403 Forbidden responses. Use `HEAD` to probe an endpoint and make sure your headers are well-formed. `GET` will return a representation of the entity or entity collection referenced by the endpoint. An `OPTIONS` request will return a JSON object that describes the endpoint, including the name, a description, the acceptable request formats, the allowed response formats, and any actions available via the endpoint. ###Filtering Entity collections can be filtered by adding a query parameter in the form: filter[<fieldname>]=<matching information> String queries are filtered using substring matching. For example, if you were trying to find [Lise Meitner](http://en.wikipedia.org/wiki/Lise_Meitner): /users/?filter[full_name]=meitn You can filter on multiple fields, or the same field in different ways, by &-ing the query parameters together. /users/?filter[full_name]=lise&filter[family_name]=mei Boolean fields should be queried with `true` or `false`. /nodes/?filter[registered]=true You can request multiple resources by filtering on id and placing comma-separated values in your query parameter. /nodes/?filter[id]=aegu6,me23a You can filter with case-sensitivity or case-insensitivity by using `contains` and `icontains`, respectively. /nodes/?filter[tags][icontains]=help ###Embedding All related resources that appear in the `relationships` attribute are embeddable, meaning that by adding a query parameter like: /nodes/?embed=contributors it is possible to fetch a Node and its contributors in a single request. The embedded results will have the following structure: {relationship_name}: {full_embedded_response} Where `full_embedded_response` means the full API response resulting from a GET request to the `href` link of the corresponding related resource. This means if there are no errors in processing the embedded request the response will have the format: data: {response} And if there are errors processing the embedded request the response will have the format: errors: {errors} Multiple embeds can be achieved with multiple query parameters separated by "&". /nodes/?embed=contributors&embed=comments Some endpoints are automatically embedded. ###Pagination All entity collection endpoints respond to the `page` query parameter behavior as described in the [JSON-API pagination spec](http://jsonapi.org/format/1.0/#crud). However, pagination links are provided in the response, and you are encouraged to use that rather than adding query parameters by hand. ###Formatting POST/PUT/PATCH request bodies The OSF API follows the JSON-API spec for [create and update requests](http://jsonapi.org/format/1.0/#crud). This means all request bodies must be wrapped with some metadata. Each request body must be an object with a `data` key containing at least a `type` member. The value of the `type` member must agree with the `type` of the entities represented by the endpoint. If not, a 409 Conflict will be returned. The request should also contain an `attributes` member with an object containing the key-value pairs to be created/updated. PUT/PATCH requests must also have an `id` key that matches the id part of the endpoint. If the `id` key does not match the id path part, a 409 Conflict error will be returned. ####Example 1: Creating a Node via POST POST /v2/nodes/ { "data": { "type": "nodes", "attributes": { "title" : "A Phylogenetic Tree of Famous Internet Cats", "category" : "project", "description" : "How closely related are Grumpy Cat and C.H. Cheezburger? Is memefulness inheritable?" } } } ####Example 2: Updating a User via PUT PUT /v2/users/me/ { "data": { "id": "3rqxc", "type": "users", "attributes": { "full_name" : "Henrietta Swan Leavitt", "given_name" : "Henrietta", "middle_names" : "Swan", "family_name" : "Leavitt" } } } **NB:** If you PUT/PATCH to the `/users/me/` endpoint, you must still provide your full user id in the `id` field of the request. We do not support using the `me` alias in request bodies at this time. ###PUT vs. PATCH For most endpoints that support updates via PUT requests, we also allow PATCH updates. The only difference is that PUT requests require all mandatory attributes to be set, even if their value is unchanged. PATCH requests may omit mandatory attributes, whose value will be unchanged. ###Attribute Validation Endpoints that allow creation or modification of entities generally limit updates to certain attributes of the entity. If you attempt to set an attribute that does not permit updates (such as a `date_created` timestamp), the API will silently ignore that attribute. This will not affect the response from the API: if the request would have succeeded without the updated attribute, it will still report as successful. Likewise, if the request would have failed without the attribute update, the API will still report a failure. Typoed or non-existent attributes will behave the same as non-updatable attributes and be silently ignored. If a request is not working the way you expect, make sure to double check your spelling. ##Responses ###Entities An entity is a single resource that has been retrieved from the API, usually from an endpoint with the entity's id as the final path part. A successful response from an entity request will be a JSON object with a top level `data` key pointing to a sub-object with the following members: + `id` The identifier for the entity. This MUST be included with [PUT and PATCH requests](#formatting-postputpatch-request-bodies). + `type` The type identifier of this entity. This MUST be included with [all create/update requests](#formatting-postputpatch-request-bodies). + `attributes` The properties of the entity. Names, descriptions, etc. + `relationships` Relationships are urls to other entities or entity collections that have a relationship to the entity. For example, the node entity provides a `contributors` relationship that points to the endpoint to retrieve all contributors to that node. It is recommended to use these links rather than to id-filter general entity collection endpoints. They'll be faster, easier, and less error-prone. Generally a relationship will have the following structure: {relationship_name}: { "links": { "related": { "href": {url_to_related_entity_or_entity_collection}, "meta": {} } } } If there are no related entities, `href` will be null. + `embeds` Please see `Embedding` documentation under `Requests`. + `links` Links are urls to alternative representations of the entity or actions that may be performed on the entity. Most entities will provide a `self` link that is the canonical endpoint for the entity where update and delete requests should be sent. In-depth documentation of actions is available by navigating to the `self` link in the Browsable API. Most entities will also provide an `html` link that directs to the entity's page on the [OSF](http://osf.io/). ###Entity Collections Entity collection endpoints return a list of entities and an additional data structure with pagination links, such as "next", "prev", "first", and "last". The OSF API limits all entity collection responses to a maximum of 10 entities. The response object has two keys: + `data` `data` is an array of entities that match the query. Each entity in the array is the same representation that is returned from that entity's `self` link, meaning that refetching the entity is unnecessary. + `links` `links` contains pagination information, including links to the previous, next, first, and last pages of results. The meta key contains the total number of entities available, as well as the current number of results displayed per page. If there are only enough results to fill one page, the `first`, `last`, `prev`, and `next` values will be null. ###Errors When a request fails for whatever reason, the OSF API will return an appropriate HTTP error code and include a descriptive error in the body of the response. The response body will be an object with a key, `errors`, pointing to an array of error objects. Generally, these error objects will consist of a `detail` key with a detailed error message and a `source` object that may contain a field `pointer` that is a [JSON Pointer](https://tools.ietf.org/html/rfc6901) to the error-causing attribute. The `error` objects may include additional information in accordance with the [JSON-API error spec](http://jsonapi.org/format/1.0/#error-objects). ####Example: Error response from an incorrect create node request { "errors": [ { "source": { "pointer": "/data/attributes/category" }, "detail": "This field is required." }, { "source": { "pointer": "/data/type" }, "detail": "This field may not be null." }, { "source": { "pointer": "/data/attributes/title" }, "detail": "This field is required." } ] } ##OSF Enum Fields Some entities in the OSF API have fields that only take a restricted set of values. Those fields are listed here for reference. Fuller descriptions are available on the relevant entity pages. ###OSF Node Categories value description ========================================== project Project hypothesis Hypothesis methods and measures Methods and Measures procedure Procedure instrumentation Instrumentation data Data analysis Analysis communication Communication other Other ###OSF Node Permission keys value description ========================================== read Read-only access write Write access (make changes, cannot delete) admin Admin access (full write, create, delete, contributor add) ###Storage Providers Valid storage providers are: value description ========================================== box Box.com cloudfiles Rackspace Cloud Files dataverse Dataverse dropbox Dropbox figshare figshare github GitHub googledrive Google Drive osfstorage OSF Storage s3 Amazon S3 """ if request.user and not request.user.is_anonymous(): user = request.user current_user = UserSerializer(user, context={'request': request}).data else: current_user = None return_val = { 'meta': { 'message': 'Welcome to the OSF API.', 'version': request.version, 'current_user': current_user, }, 'links': { 'nodes': absolute_reverse('nodes:node-list'), 'users': absolute_reverse('users:user-list'), 'collections': absolute_reverse('collections:collection-list'), 'registrations': absolute_reverse('registrations:registration-list'), 'institutions': absolute_reverse('institutions:institution-list'), 'licenses': absolute_reverse('licenses:license-list'), 'metaschemas': absolute_reverse('metaschemas:metaschema-list'), } } return Response(return_val) def error_404(request, format=None, *args, **kwargs): return JsonResponse( {'errors': [{'detail': 'Not found.'}]}, status=404, content_type='application/vnd.api+json; application/json' )
apache-2.0
UOMx/edx-platform
lms/djangoapps/courseware/migrations/0001_initial.py
46
7819
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import model_utils.fields import xmodule_django.models import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='OfflineComputedGrade', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)), ('created', models.DateTimeField(db_index=True, auto_now_add=True, null=True)), ('updated', models.DateTimeField(auto_now=True, db_index=True)), ('gradeset', models.TextField(null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='OfflineComputedGradeLog', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)), ('created', models.DateTimeField(db_index=True, auto_now_add=True, null=True)), ('seconds', models.IntegerField(default=0)), ('nstudents', models.IntegerField(default=0)), ], options={ 'ordering': ['-created'], 'get_latest_by': 'created', }, ), migrations.CreateModel( name='StudentFieldOverride', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)), ('location', xmodule_django.models.LocationKeyField(max_length=255, db_index=True)), ('field', models.CharField(max_length=255)), ('value', models.TextField(default=b'null')), ('student', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='StudentModule', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('module_type', models.CharField(default=b'problem', max_length=32, db_index=True, choices=[(b'problem', b'problem'), (b'video', b'video'), (b'html', b'html'), (b'course', b'course'), (b'chapter', b'Section'), (b'sequential', b'Subsection'), (b'library_content', b'Library Content')])), ('module_state_key', xmodule_django.models.LocationKeyField(max_length=255, db_column=b'module_id', db_index=True)), ('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)), ('state', models.TextField(null=True, blank=True)), ('grade', models.FloatField(db_index=True, null=True, blank=True)), ('max_grade', models.FloatField(null=True, blank=True)), ('done', models.CharField(default=b'na', max_length=8, db_index=True, choices=[(b'na', b'NOT_APPLICABLE'), (b'f', b'FINISHED'), (b'i', b'INCOMPLETE')])), ('created', models.DateTimeField(auto_now_add=True, db_index=True)), ('modified', models.DateTimeField(auto_now=True, db_index=True)), ('student', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='StudentModuleHistory', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('version', models.CharField(db_index=True, max_length=255, null=True, blank=True)), ('created', models.DateTimeField(db_index=True)), ('state', models.TextField(null=True, blank=True)), ('grade', models.FloatField(null=True, blank=True)), ('max_grade', models.FloatField(null=True, blank=True)), ('student_module', models.ForeignKey(to='courseware.StudentModule')), ], options={ 'get_latest_by': 'created', }, ), migrations.CreateModel( name='XModuleStudentInfoField', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field_name', models.CharField(max_length=64, db_index=True)), ('value', models.TextField(default=b'null')), ('created', models.DateTimeField(auto_now_add=True, db_index=True)), ('modified', models.DateTimeField(auto_now=True, db_index=True)), ('student', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='XModuleStudentPrefsField', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field_name', models.CharField(max_length=64, db_index=True)), ('value', models.TextField(default=b'null')), ('created', models.DateTimeField(auto_now_add=True, db_index=True)), ('modified', models.DateTimeField(auto_now=True, db_index=True)), ('module_type', xmodule_django.models.BlockTypeKeyField(max_length=64, db_index=True)), ('student', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='XModuleUserStateSummaryField', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field_name', models.CharField(max_length=64, db_index=True)), ('value', models.TextField(default=b'null')), ('created', models.DateTimeField(auto_now_add=True, db_index=True)), ('modified', models.DateTimeField(auto_now=True, db_index=True)), ('usage_id', xmodule_django.models.LocationKeyField(max_length=255, db_index=True)), ], ), migrations.AlterUniqueTogether( name='xmoduleuserstatesummaryfield', unique_together=set([('usage_id', 'field_name')]), ), migrations.AlterUniqueTogether( name='xmodulestudentprefsfield', unique_together=set([('student', 'module_type', 'field_name')]), ), migrations.AlterUniqueTogether( name='xmodulestudentinfofield', unique_together=set([('student', 'field_name')]), ), migrations.AlterUniqueTogether( name='studentmodule', unique_together=set([('student', 'module_state_key', 'course_id')]), ), migrations.AlterUniqueTogether( name='studentfieldoverride', unique_together=set([('course_id', 'field', 'location', 'student')]), ), migrations.AlterUniqueTogether( name='offlinecomputedgrade', unique_together=set([('user', 'course_id')]), ), ]
agpl-3.0
domob1812/i0coin
qa/rpc-tests/getblocktemplate_longpoll.py
13
3618
#!/usr/bin/env python2 # Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * def check_array_result(object_array, to_match, expected): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. """ num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue for key,value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) num_matched = num_matched+1 if num_matched == 0: raise AssertionError("No objects matched %s"%(str(to_match))) import threading class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid templat = node.getblocktemplate() self.longpollid = templat['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy(node.url, 1, timeout=600) def run(self): self.node.getblocktemplate({'longpollid':self.longpollid}) class GetBlockTemplateLPTest(BitcoinTestFramework): ''' Test longpolling with getblocktemplate. ''' def run_test(self): print "Warning: this test will take about 70 seconds in the best case. Be patient." self.nodes[0].generate(10) templat = self.nodes[0].getblocktemplate() longpollid = templat['longpollid'] # longpollid should not change between successive invocations if nothing else happens templat2 = self.nodes[0].getblocktemplate() assert(templat2['longpollid'] == longpollid) # Test 1: test that the longpolling wait if we do nothing thr = LongpollThread(self.nodes[0]) thr.start() # check that thread still lives thr.join(5) # wait 5 seconds or until thread exits assert(thr.is_alive()) # Test 2: test that longpoll will terminate if another node generates a block self.nodes[1].generate(1) # generate a block on another node # check that thread will exit now that new transaction entered mempool thr.join(5) # wait 5 seconds or until thread exits assert(not thr.is_alive()) # Test 3: test that longpoll will terminate if we generate a block ourselves thr = LongpollThread(self.nodes[0]) thr.start() self.nodes[0].generate(1) # generate a block on another node thr.join(5) # wait 5 seconds or until thread exits assert(not thr.is_alive()) # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20) # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned thr.join(60 + 20) assert(not thr.is_alive()) if __name__ == '__main__': GetBlockTemplateLPTest().main()
mit
MickaelBergem/django-allauth
allauth/socialaccount/providers/coinbase/views.py
60
1110
import requests from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter, OAuth2LoginView, OAuth2CallbackView) from .provider import CoinbaseProvider class CoinbaseOAuth2Adapter(OAuth2Adapter): provider_id = CoinbaseProvider.id @property def authorize_url(self): return 'https://coinbase.com/oauth/authorize' @property def access_token_url(self): return 'https://coinbase.com/oauth/token' @property def profile_url(self): return 'https://coinbase.com/api/v1/users' def complete_login(self, request, app, token, **kwargs): response = requests.get(self.profile_url, params={'access_token': token}) extra_data = response.json()['users'][0]['user'] return self.get_provider().sociallogin_from_response(request, extra_data) oauth2_login = OAuth2LoginView.adapter_view(CoinbaseOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(CoinbaseOAuth2Adapter)
mit