repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
3L3N4/metagoofil
pdfminer/pdftypes.py
26
7735
#!/usr/bin/env python2 import sys import zlib from lzw import lzwdecode from ascii85 import ascii85decode, asciihexdecode from runlength import rldecode from psparser import PSException, PSObject from psparser import LIT, KWD, STRICT LITERAL_CRYPT = LIT('Crypt') # Abbreviation of Filter names in PDF 4.8.6. "Inline Images" LITERALS_FLATE_DECODE = (LIT('FlateDecode'), LIT('Fl')) LITERALS_LZW_DECODE = (LIT('LZWDecode'), LIT('LZW')) LITERALS_ASCII85_DECODE = (LIT('ASCII85Decode'), LIT('A85')) LITERALS_ASCIIHEX_DECODE = (LIT('ASCIIHexDecode'), LIT('AHx')) LITERALS_RUNLENGTH_DECODE = (LIT('RunLengthDecode'), LIT('RL')) LITERALS_CCITTFAX_DECODE = (LIT('CCITTFaxDecode'), LIT('CCF')) LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT')) ## PDF Objects ## class PDFObject(PSObject): pass class PDFException(PSException): pass class PDFTypeError(PDFException): pass class PDFValueError(PDFException): pass class PDFNotImplementedError(PSException): pass ## PDFObjRef ## class PDFObjRef(PDFObject): def __init__(self, doc, objid, _): if objid == 0: if STRICT: raise PDFValueError('PDF object id cannot be 0.') self.doc = doc self.objid = objid #self.genno = genno # Never used. return def __repr__(self): return '<PDFObjRef:%d>' % (self.objid) def resolve(self): return self.doc.getobj(self.objid) # resolve def resolve1(x): """Resolves an object. If this is an array or dictionary, it may still contains some indirect objects inside. """ while isinstance(x, PDFObjRef): x = x.resolve() return x def resolve_all(x): """Recursively resolves the given object and all the internals. Make sure there is no indirect reference within the nested object. This procedure might be slow. """ while isinstance(x, PDFObjRef): x = x.resolve() if isinstance(x, list): x = [ resolve_all(v) for v in x ] elif isinstance(x, dict): for (k,v) in x.iteritems(): x[k] = resolve_all(v) return x def decipher_all(decipher, objid, genno, x): """Recursively deciphers the given object. """ if isinstance(x, str): return decipher(objid, genno, x) if isinstance(x, list): x = [ decipher_all(decipher, objid, genno, v) for v in x ] elif isinstance(x, dict): for (k,v) in x.iteritems(): x[k] = decipher_all(decipher, objid, genno, v) return x # Type cheking def int_value(x): x = resolve1(x) if not isinstance(x, int): if STRICT: raise PDFTypeError('Integer required: %r' % x) return 0 return x def float_value(x): x = resolve1(x) if not isinstance(x, float): if STRICT: raise PDFTypeError('Float required: %r' % x) return 0.0 return x def num_value(x): x = resolve1(x) if not (isinstance(x, int) or isinstance(x, float)): if STRICT: raise PDFTypeError('Int or Float required: %r' % x) return 0 return x def str_value(x): x = resolve1(x) if not isinstance(x, str): if STRICT: raise PDFTypeError('String required: %r' % x) return '' return x def list_value(x): x = resolve1(x) if not (isinstance(x, list) or isinstance(x, tuple)): if STRICT: raise PDFTypeError('List required: %r' % x) return [] return x def dict_value(x): x = resolve1(x) if not isinstance(x, dict): if STRICT: raise PDFTypeError('Dict required: %r' % x) return {} return x def stream_value(x): x = resolve1(x) if not isinstance(x, PDFStream): if STRICT: raise PDFTypeError('PDFStream required: %r' % x) return PDFStream({}, '') return x ## PDFStream type ## class PDFStream(PDFObject): def __init__(self, attrs, rawdata, decipher=None): assert isinstance(attrs, dict) self.attrs = attrs self.rawdata = rawdata self.decipher = decipher self.data = None self.objid = None self.genno = None return def set_objid(self, objid, genno): self.objid = objid self.genno = genno return def __repr__(self): if self.data is None: assert self.rawdata is not None return '<PDFStream(%r): raw=%d, %r>' % (self.objid, len(self.rawdata), self.attrs) else: assert self.data is not None return '<PDFStream(%r): len=%d, %r>' % (self.objid, len(self.data), self.attrs) def __contains__(self, name): return name in self.attrs def __getitem__(self, name): return self.attrs[name] def get(self, name, default=None): return self.attrs.get(name, default) def get_any(self, names, default=None): for name in names: if name in self.attrs: return self.attrs[name] return default def get_filters(self): filters = self.get_any(('F', 'Filter')) if not filters: return [] if isinstance(filters, list): return filters return [ filters ] def decode(self): assert self.data is None and self.rawdata != None data = self.rawdata if self.decipher: # Handle encryption data = self.decipher(self.objid, self.genno, data) filters = self.get_filters() if not filters: self.data = data self.rawdata = None return for f in filters: if f in LITERALS_FLATE_DECODE: # will get errors if the document is encrypted. try: data = zlib.decompress(data) except zlib.error: data = '' elif f in LITERALS_LZW_DECODE: data = lzwdecode(data) elif f in LITERALS_ASCII85_DECODE: data = ascii85decode(data) elif f in LITERALS_ASCIIHEX_DECODE: data = asciihexdecode(data) elif f in LITERALS_RUNLENGTH_DECODE: data = rldecode(data) elif f in LITERALS_CCITTFAX_DECODE: #data = ccittfaxdecode(data) raise PDFNotImplementedError('Unsupported filter: %r' % f) elif f == LITERAL_CRYPT: # not yet.. raise PDFNotImplementedError('/Crypt filter is unsupported') else: raise PDFNotImplementedError('Unsupported filter: %r' % f) # apply predictors params = self.get_any(('DP', 'DecodeParms', 'FDecodeParms'), {}) if 'Predictor' in params and 'Columns' in params: pred = int_value(params['Predictor']) columns = int_value(params['Columns']) if pred: if pred != 12: raise PDFNotImplementedError('Unsupported predictor: %r' % pred) buf = '' ent0 = '\x00' * columns for i in xrange(0, len(data), columns+1): pred = data[i] ent1 = data[i+1:i+1+columns] if pred == '\x02': ent1 = ''.join( chr((ord(a)+ord(b)) & 255) for (a,b) in zip(ent0,ent1) ) buf += ent1 ent0 = ent1 data = buf self.data = data self.rawdata = None return def get_data(self): if self.data is None: self.decode() return self.data def get_rawdata(self): return self.rawdata
gpl-2.0
egonw/citeulike
plugins/python/cases.py
2
3673
#!/usr/bin/env python import os, sys, re, urllib2, cookielib, string from urllib import urlencode from urllib2 import urlopen from copy import copy import BeautifulSoup import htmlentitydefs import socket socket.setdefaulttimeout(15) class ParseException(Exception): pass ## # Removes HTML or XML character references and entities from a text string. # # @param text The HTML (or XML) source text. # @return The plain text, as a Unicode string, if necessary. def unescape(text): def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub("&#?\w+;", fixup, text).encode('utf-8','ignore') def meta(soup, key): el = soup.find("meta", {'name':key}) if el: return el['content'].encode('utf-8','ignore') return None def item(soup, entry, key): el = meta(soup, key) if el: print "%s\t%s" % (entry, el) def handle(url): m = re.match(r'http://(?:www\.)?(jmedicalcasereports|casesjournal)\.com/(?:jmedicalcasereports|casesjournal)/article/view/(\d+)', url) if not m: raise ParseException, "URL not supported %s" % url site = m.group(1) wkey = m.group(2) url = "http://%s.com/%s/article/viewArticle/%s" % (site, site, wkey) page = urlopen(url).read() soup = BeautifulSoup.BeautifulSoup(page) head = soup.find("head") doi = meta(head, 'citation_doi') if not doi: raise ParseException, "Cannot find DOI" citation_pdf_url = meta(head, 'citation_pdf_url') pdf_key = "" if citation_pdf_url: m = re.search(r'(\d+)/(\d+)', citation_pdf_url) if m: pdf_key = m.group(2) print "begin_tsv" print "linkout\tDOI\t\t%s\t\t" % (doi) if site == "casesjournal": print "linkout\tCASES\t%s\t\t%s\t" % (wkey, pdf_key) elif site == "jmedicalcasereports": print "linkout\tJMEDC\t%s\t\t%s\t" % (wkey, pdf_key) else: raise ParseException, "Unknown journal %s" % site print "type\tJOUR" title = meta(head, "citation_title") if title: print "title\t%s" % unescape(title) item(head, "journal", "citation_journal_title") item(head, "issue", "citation_issue") item(head, "issn", "citation_issn") date = meta(head, 'citation_date') if date: m = re.match(r'(\d+)/(\d+)/(\d+)', date) if m: day = m.group(1) month = m.group(2) year = m.group(3) if year: print "year\t%s" % year if month: print "month\t%s" % month if day: print "day\t%s" % day # authors authors = head.findAll("meta", {"name":"DC.Creator.PersonalName"}) if authors: for a in authors: print "author\t%s" % a['content'].encode('utf-8','ignore') abstract = meta(head,"DC.Description") if abstract: abstract = abstract.strip() abstract = re.sub(r'<[^>]+>','',abstract) abstract = unescape(abstract) abstract = abstract.strip() print "abstract\t%s" % abstract print "doi\t%s" % doi print "end_tsv" print "status\tok" # read url from std input url = sys.stdin.readline() # get rid of the newline at the end url = url.strip() try: handle(url) except Exception, e: import traceback line = traceback.tb_lineno(sys.exc_info()[2]) print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to [email protected] quoting error code %d." % line]) raise
bsd-3-clause
nguyenfilip/subscription-manager
test/test_rct_cert_command.py
3
2207
# # Copyright (c) 2012 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import unittest from mock import patch from rhsm.certificate import CertificateException from rct.cert_commands import RCTCertCommand from subscription_manager.cli import InvalidCLIOptionError class RCTCertCommandTests(unittest.TestCase): def test_file_arg_required(self): command = RCTCertCommand() try: command.main([]) self.fail("Expected InvalidCLIOptionError since no file arg.") except InvalidCLIOptionError, e: self.assertEqual("You must specify a certificate file.", str(e)) def test_invalid_file_arg(self): command = RCTCertCommand() try: command.main(["this_file_does_not_exist.crt"]) self.fail("Expected InvalidCLIOptionError since no file does not exist.") except InvalidCLIOptionError, e: self.assertEqual("The specified certificate file does not exist.", str(e)) @patch("os.path.isfile") @patch("rhsm.certificate.create_from_file") def test_valid_x509_required(self, mock_create, mock_isfile): mock_create.side_effect = CertificateException("error!") mock_isfile.return_value = True command = RCTCertCommand() command._do_command = lambda: command._create_cert() try: command.main(['dummy-file.pem']) self.fail("Expected InvalidCLIOptionError since bad x509 file.") except InvalidCLIOptionError, e: self.assertEqual( "Unable to read certificate file 'dummy-file.pem': error!", str(e))
gpl-2.0
alexgorban/models
research/textsum/batch_reader.py
14
10400
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Batch reader to seq2seq attention model, with bucketing support.""" from collections import namedtuple from random import shuffle from threading import Thread import time import numpy as np import six from six.moves import queue as Queue from six.moves import xrange import tensorflow as tf import data ModelInput = namedtuple('ModelInput', 'enc_input dec_input target enc_len dec_len ' 'origin_article origin_abstract') BUCKET_CACHE_BATCH = 100 QUEUE_NUM_BATCH = 100 class Batcher(object): """Batch reader with shuffling and bucketing support.""" def __init__(self, data_path, vocab, hps, article_key, abstract_key, max_article_sentences, max_abstract_sentences, bucketing=True, truncate_input=False): """Batcher constructor. Args: data_path: tf.Example filepattern. vocab: Vocabulary. hps: Seq2SeqAttention model hyperparameters. article_key: article feature key in tf.Example. abstract_key: abstract feature key in tf.Example. max_article_sentences: Max number of sentences used from article. max_abstract_sentences: Max number of sentences used from abstract. bucketing: Whether bucket articles of similar length into the same batch. truncate_input: Whether to truncate input that is too long. Alternative is to discard such examples. """ self._data_path = data_path self._vocab = vocab self._hps = hps self._article_key = article_key self._abstract_key = abstract_key self._max_article_sentences = max_article_sentences self._max_abstract_sentences = max_abstract_sentences self._bucketing = bucketing self._truncate_input = truncate_input self._input_queue = Queue.Queue(QUEUE_NUM_BATCH * self._hps.batch_size) self._bucket_input_queue = Queue.Queue(QUEUE_NUM_BATCH) self._input_threads = [] for _ in xrange(16): self._input_threads.append(Thread(target=self._FillInputQueue)) self._input_threads[-1].daemon = True self._input_threads[-1].start() self._bucketing_threads = [] for _ in xrange(4): self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue)) self._bucketing_threads[-1].daemon = True self._bucketing_threads[-1].start() self._watch_thread = Thread(target=self._WatchThreads) self._watch_thread.daemon = True self._watch_thread.start() def NextBatch(self): """Returns a batch of inputs for seq2seq attention model. Returns: enc_batch: A batch of encoder inputs [batch_size, hps.enc_timestamps]. dec_batch: A batch of decoder inputs [batch_size, hps.dec_timestamps]. target_batch: A batch of targets [batch_size, hps.dec_timestamps]. enc_input_len: encoder input lengths of the batch. dec_input_len: decoder input lengths of the batch. loss_weights: weights for loss function, 1 if not padded, 0 if padded. origin_articles: original article words. origin_abstracts: original abstract words. """ enc_batch = np.zeros( (self._hps.batch_size, self._hps.enc_timesteps), dtype=np.int32) enc_input_lens = np.zeros( (self._hps.batch_size), dtype=np.int32) dec_batch = np.zeros( (self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32) dec_output_lens = np.zeros( (self._hps.batch_size), dtype=np.int32) target_batch = np.zeros( (self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32) loss_weights = np.zeros( (self._hps.batch_size, self._hps.dec_timesteps), dtype=np.float32) origin_articles = ['None'] * self._hps.batch_size origin_abstracts = ['None'] * self._hps.batch_size buckets = self._bucket_input_queue.get() for i in xrange(self._hps.batch_size): (enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len, article, abstract) = buckets[i] origin_articles[i] = article origin_abstracts[i] = abstract enc_input_lens[i] = enc_input_len dec_output_lens[i] = dec_output_len enc_batch[i, :] = enc_inputs[:] dec_batch[i, :] = dec_inputs[:] target_batch[i, :] = targets[:] for j in xrange(dec_output_len): loss_weights[i][j] = 1 return (enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens, loss_weights, origin_articles, origin_abstracts) def _FillInputQueue(self): """Fill input queue with ModelInput.""" start_id = self._vocab.WordToId(data.SENTENCE_START) end_id = self._vocab.WordToId(data.SENTENCE_END) pad_id = self._vocab.WordToId(data.PAD_TOKEN) input_gen = self._TextGenerator(data.ExampleGen(self._data_path)) while True: (article, abstract) = six.next(input_gen) article_sentences = [sent.strip() for sent in data.ToSentences(article, include_token=False)] abstract_sentences = [sent.strip() for sent in data.ToSentences(abstract, include_token=False)] enc_inputs = [] # Use the <s> as the <GO> symbol for decoder inputs. dec_inputs = [start_id] # Convert first N sentences to word IDs, stripping existing <s> and </s>. for i in xrange(min(self._max_article_sentences, len(article_sentences))): enc_inputs += data.GetWordIds(article_sentences[i], self._vocab) for i in xrange(min(self._max_abstract_sentences, len(abstract_sentences))): dec_inputs += data.GetWordIds(abstract_sentences[i], self._vocab) # Filter out too-short input if (len(enc_inputs) < self._hps.min_input_len or len(dec_inputs) < self._hps.min_input_len): tf.logging.warning('Drop an example - too short.\nenc:%d\ndec:%d', len(enc_inputs), len(dec_inputs)) continue # If we're not truncating input, throw out too-long input if not self._truncate_input: if (len(enc_inputs) > self._hps.enc_timesteps or len(dec_inputs) > self._hps.dec_timesteps): tf.logging.warning('Drop an example - too long.\nenc:%d\ndec:%d', len(enc_inputs), len(dec_inputs)) continue # If we are truncating input, do so if necessary else: if len(enc_inputs) > self._hps.enc_timesteps: enc_inputs = enc_inputs[:self._hps.enc_timesteps] if len(dec_inputs) > self._hps.dec_timesteps: dec_inputs = dec_inputs[:self._hps.dec_timesteps] # targets is dec_inputs without <s> at beginning, plus </s> at end targets = dec_inputs[1:] targets.append(end_id) # Now len(enc_inputs) should be <= enc_timesteps, and # len(targets) = len(dec_inputs) should be <= dec_timesteps enc_input_len = len(enc_inputs) dec_output_len = len(targets) # Pad if necessary while len(enc_inputs) < self._hps.enc_timesteps: enc_inputs.append(pad_id) while len(dec_inputs) < self._hps.dec_timesteps: dec_inputs.append(end_id) while len(targets) < self._hps.dec_timesteps: targets.append(end_id) element = ModelInput(enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len, ' '.join(article_sentences), ' '.join(abstract_sentences)) self._input_queue.put(element) def _FillBucketInputQueue(self): """Fill bucketed batches into the bucket_input_queue.""" while True: inputs = [] for _ in xrange(self._hps.batch_size * BUCKET_CACHE_BATCH): inputs.append(self._input_queue.get()) if self._bucketing: inputs = sorted(inputs, key=lambda inp: inp.enc_len) batches = [] for i in xrange(0, len(inputs), self._hps.batch_size): batches.append(inputs[i:i+self._hps.batch_size]) shuffle(batches) for b in batches: self._bucket_input_queue.put(b) def _WatchThreads(self): """Watch the daemon input threads and restart if dead.""" while True: time.sleep(60) input_threads = [] for t in self._input_threads: if t.is_alive(): input_threads.append(t) else: tf.logging.error('Found input thread dead.') new_t = Thread(target=self._FillInputQueue) input_threads.append(new_t) input_threads[-1].daemon = True input_threads[-1].start() self._input_threads = input_threads bucketing_threads = [] for t in self._bucketing_threads: if t.is_alive(): bucketing_threads.append(t) else: tf.logging.error('Found bucketing thread dead.') new_t = Thread(target=self._FillBucketInputQueue) bucketing_threads.append(new_t) bucketing_threads[-1].daemon = True bucketing_threads[-1].start() self._bucketing_threads = bucketing_threads def _TextGenerator(self, example_gen): """Generates article and abstract text from tf.Example.""" while True: e = six.next(example_gen) try: article_text = self._GetExFeatureText(e, self._article_key) abstract_text = self._GetExFeatureText(e, self._abstract_key) except ValueError: tf.logging.error('Failed to get article or abstract from example') continue yield (article_text, abstract_text) def _GetExFeatureText(self, ex, key): """Extract text for a feature from td.Example. Args: ex: tf.Example. key: key of the feature to be extracted. Returns: feature: a feature text extracted. """ return ex.features.feature[key].bytes_list.value[0]
apache-2.0
jelugbo/hebs_master
common/lib/xmodule/xmodule/modulestore/django.py
17
6349
""" Module that provides a connection to the ModuleStore specified in the django settings. Passes settings.MODULESTORE as kwargs to MongoModuleStore """ from __future__ import absolute_import from importlib import import_module from django.conf import settings if not settings.configured: settings.configure() from django.core.cache import get_cache, InvalidCacheBackendError import django.utils import re import threading from xmodule.util.django import get_current_request_hostname import xmodule.modulestore # pylint: disable=unused-import from xmodule.modulestore.mixed import MixedModuleStore from xmodule.modulestore.draft_and_published import BranchSettingMixin from xmodule.contentstore.django import contentstore import xblock.reference.plugins # We may not always have the request_cache module available try: from request_cache.middleware import RequestCache HAS_REQUEST_CACHE = True except ImportError: HAS_REQUEST_CACHE = False ASSET_IGNORE_REGEX = getattr(settings, "ASSET_IGNORE_REGEX", r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)") def load_function(path): """ Load a function by name. path is a string of the form "path.to.module.function" returns the imported python object `function` from `path.to.module` """ module_path, _, name = path.rpartition('.') return getattr(import_module(module_path), name) def create_modulestore_instance(engine, content_store, doc_store_config, options, i18n_service=None, fs_service=None): """ This will return a new instance of a modulestore given an engine and options """ class_ = load_function(engine) _options = {} _options.update(options) FUNCTION_KEYS = ['render_template'] for key in FUNCTION_KEYS: if key in _options and isinstance(_options[key], basestring): _options[key] = load_function(_options[key]) if HAS_REQUEST_CACHE: request_cache = RequestCache.get_request_cache() else: request_cache = None try: metadata_inheritance_cache = get_cache('mongo_metadata_inheritance') except InvalidCacheBackendError: metadata_inheritance_cache = get_cache('default') if issubclass(class_, MixedModuleStore): _options['create_modulestore_instance'] = create_modulestore_instance if issubclass(class_, BranchSettingMixin): _options['branch_setting_func'] = _get_modulestore_branch_setting return class_( contentstore=content_store, metadata_inheritance_cache_subsystem=metadata_inheritance_cache, request_cache=request_cache, xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()), xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None), doc_store_config=doc_store_config, i18n_service=i18n_service or ModuleI18nService(), fs_service=fs_service or xblock.reference.plugins.FSService(), **_options ) # A singleton instance of the Mixed Modulestore _MIXED_MODULESTORE = None def modulestore(): """ Returns the Mixed modulestore """ global _MIXED_MODULESTORE # pylint: disable=global-statement if _MIXED_MODULESTORE is None: _MIXED_MODULESTORE = create_modulestore_instance( settings.MODULESTORE['default']['ENGINE'], contentstore(), settings.MODULESTORE['default'].get('DOC_STORE_CONFIG', {}), settings.MODULESTORE['default'].get('OPTIONS', {}) ) return _MIXED_MODULESTORE def clear_existing_modulestores(): """ Clear the existing modulestore instances, causing them to be re-created when accessed again. This is useful for flushing state between unit tests. """ global _MIXED_MODULESTORE # pylint: disable=global-statement _MIXED_MODULESTORE = None class ModuleI18nService(object): """ Implement the XBlock runtime "i18n" service. Mostly a pass-through to Django's translation module. django.utils.translation implements the gettext.Translations interface (it has ugettext, ungettext, etc), so we can use it directly as the runtime i18n service. """ def __getattr__(self, name): return getattr(django.utils.translation, name) def strftime(self, *args, **kwargs): """ A locale-aware implementation of strftime. """ # This is the wrong place to import this function. I'm putting it here # because the xmodule test suite can't import this module, because # Django is not available in that suite. This function isn't called in # that suite, so this hides the import so the test won't fail. # # As I said, this is wrong. But Cale says this code will soon be # refactored to a place that will be right, and the code can be made # right there. If you are reading this comment after April 1, 2014, # then Cale was a liar. from util.date_utils import strftime_localized return strftime_localized(*args, **kwargs) def _get_modulestore_branch_setting(): """ Returns the branch setting for the module store from the current Django request if configured, else returns the branch value from the configuration settings if set, else returns None The value of the branch setting is cached in a thread-local variable so it is not repeatedly recomputed """ def get_branch_setting(): """ Finds and returns the branch setting based on the Django request and the configuration settings """ branch = None hostname = get_current_request_hostname() if hostname: # get mapping information which is defined in configurations mappings = getattr(settings, 'HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', None) # compare hostname against the regex expressions set of mappings which will tell us which branch to use if mappings: for key in mappings.iterkeys(): if re.match(key, hostname): return mappings[key] if branch is None: branch = getattr(settings, 'MODULESTORE_BRANCH', None) return branch # leaving this in code structured in closure-friendly format b/c we might eventually cache this (again) # using request_cache return get_branch_setting()
agpl-3.0
xuweiliang/Codelibrary
openstack_dashboard/dashboards/admin/users/tables.py
1
7776
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.template import defaultfilters from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import forms from horizon import tables from openstack_dashboard import api from openstack_dashboard import policy from openstack_dashboard import record_action ENABLE = 0 DISABLE = 1 KEYSTONE_V2_ENABLED = api.keystone.VERSIONS.active < 3 class CreateUserLink(tables.LinkAction): name = "create" verbose_name = _("Create User") url = "horizon:admin:users:create" classes = ("ajax-modal",) icon = "plus" policy_rules = (('admin', 'admin:create_grant'), ("admin", "admin:create_user"), ("admin", "admin:list_roles"), ("admin", "admin:list_projects"),) def allowed(self, request, user): return api.keystone.keystone_can_edit_user() class EditUserLink(policy.PolicyTargetMixin, tables.LinkAction): name = "edit" verbose_name = _("Edit") url = "horizon:admin:users:update" classes = ("ajax-modal",) icon = "pencil" policy_rules = (("admin", "admin:update_user"), ("admin", "admin:list_projects"),) policy_target_attrs = (("user_id", "id"),) def allowed(self, request, user): return api.keystone.keystone_can_edit_user() class ToggleEnabled(policy.PolicyTargetMixin, tables.BatchAction): name = "toggle" @staticmethod def action_present(count): return ( ungettext_lazy( u"Enable User", u"Enable Users", count ), ungettext_lazy( u"Disable User", u"Disable Users", count ), ) @staticmethod def action_past(count): return ( ungettext_lazy( u"Enabled User", u"Enabled Users", count ), ungettext_lazy( u"Disabled User", u"Disabled Users", count ), ) classes = ("btn-toggle",) policy_rules = (("admin", "admin:update_user"),) policy_target_attrs = (("user_id", "id"),) def allowed(self, request, user=None): if not api.keystone.keystone_can_edit_user(): return False self.enabled = True if not user: return self.enabled self.enabled = user.enabled if self.enabled: self.current_present_action = DISABLE else: self.current_present_action = ENABLE return True def update(self, request, user=None): super(ToggleEnabled, self).update(request, user) if user and user.id == request.user.id: self.attrs["disabled"] = "disabled" def action(self, request, obj_id): user_data = api.keystone.user_get(request, obj_id) if obj_id == request.user.id: msg = _('You cannot disable the user you are ' 'currently logged in as.') messages.info(request, msg) api.nova.systemlogs_create(request, user_data.name, record_action.TOGGLEUSER, result=False, detail=msg) return if self.enabled: api.keystone.user_update_enabled(request, obj_id, False) self.current_past_action = DISABLE flag = 'Disable ' else: api.keystone.user_update_enabled(request, obj_id, True) self.current_past_action = ENABLE flag = 'Enable ' objectname = flag + 'User' api.nova.systemlogs_create(request, user_data.name, objectname, result=True, detail='-') class DeleteUsersAction(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Delete User", u"Delete Users", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Deleted User", u"Deleted Users", count ) policy_rules = (("admin", "admin:delete_user"),) def allowed(self, request, datum): SystemName=['glance', 'cinder', 'neutron', 'nova', 'admin', request.user.username] self.result = True self.detail = '-' if datum is not None and datum.name in SystemName: self.result = False self.detail = _("Cannot allowed to delete user") #if not api.keystone.keystone_can_edit_user() or \ # (datum and datum.id == request.user.id): # return False return False return True def delete(self, request, obj_id): user_data = api.keystone.user_get(request, obj_id) api.keystone.user_delete(request, obj_id) class UserFilterAction(tables.FilterAction): def filter(self, table, users, filter_string): """Naive case-insensitive search.""" q = filter_string.lower() return [user for user in users if q in user.name.lower()] # if api.keystone.VERSIONS.active < 3: # filter_type = "query" # else: # filter_type = "server" # filter_choices = (("name", _("User Name ="), True), # ("id", _("User ID ="), True), # ("enabled", _("Enabled ="), True, _('e.g. Yes/No'))) class UpdateRow(tables.Row): ajax = True def get_data(self, request, user_id): user_info = api.keystone.user_get(request, user_id, admin=True) return user_info class UsersTable(tables.DataTable): STATUS_CHOICES = ( ("true", True), ("false", False) ) name = tables.Column('name', verbose_name=_('User Name')) email = tables.Column('email', verbose_name=_('Email'), filters=(lambda v: defaultfilters .default_if_none(v, ""), defaultfilters.escape, defaultfilters.urlize) ) # Default tenant is not returned from Keystone currently. # default_tenant = tables.Column('default_tenant', # verbose_name=_('Default Project')) #id = tables.Column('id', verbose_name=_('User ID')) enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True, status_choices=STATUS_CHOICES, filters=(defaultfilters.yesno, defaultfilters.capfirst), empty_value="False") if api.keystone.VERSIONS.active >= 3: domain_name = tables.Column('domain_name', verbose_name=_('Domain Name'), attrs={'data-type': 'uuid'}) class Meta(object): name = "users" verbose_name = _("Users") row_actions = (EditUserLink, ToggleEnabled, DeleteUsersAction) table_actions = (UserFilterAction, CreateUserLink, DeleteUsersAction) row_class = UpdateRow
apache-2.0
ccellis/WHACK2016
flask/lib/python2.7/site-packages/pip/commands/search.py
344
4736
import sys import textwrap import pip.download from pip.basecommand import Command, SUCCESS from pip.util import get_terminal_size from pip.log import logger from pip.backwardcompat import xmlrpclib, reduce, cmp from pip.exceptions import CommandError from pip.status_codes import NO_MATCHES_FOUND from pip._vendor import pkg_resources from distutils.version import StrictVersion, LooseVersion class SearchCommand(Command): """Search for PyPI packages whose name or summary contains <query>.""" name = 'search' usage = """ %prog [options] <query>""" summary = 'Search PyPI for packages.' def __init__(self, *args, **kw): super(SearchCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: raise CommandError('Missing required argument (search query).') query = args index_url = options.index pypi_hits = self.search(query, index_url) hits = transform_hits(pypi_hits) terminal_width = None if sys.stdout.isatty(): terminal_width = get_terminal_size()[0] print_results(hits, terminal_width=terminal_width) if pypi_hits: return SUCCESS return NO_MATCHES_FOUND def search(self, query, index_url): pypi = xmlrpclib.ServerProxy(index_url) hits = pypi.search({'name': query, 'summary': query}, 'or') return hits def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score} else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a list sorted by score package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True) return package_list def print_results(hits, name_column_width=25, terminal_width=None): installed_packages = [p.project_name for p in pkg_resources.working_set] for hit in hits: name = hit['name'] summary = hit['summary'] or '' if terminal_width is not None: # wrap and indent summary to fit terminal summary = textwrap.wrap(summary, terminal_width - name_column_width - 5) summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) line = '%s - %s' % (name.ljust(name_column_width), summary) try: logger.notify(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) logger.indent += 2 try: latest = highest_version(hit['versions']) if dist.version == latest: logger.notify('INSTALLED: %s (latest)' % dist.version) else: logger.notify('INSTALLED: %s' % dist.version) logger.notify('LATEST: %s' % latest) finally: logger.indent -= 2 except UnicodeEncodeError: pass def compare_versions(version1, version2): try: return cmp(StrictVersion(version1), StrictVersion(version2)) # in case of abnormal version number, fall back to LooseVersion except ValueError: pass try: return cmp(LooseVersion(version1), LooseVersion(version2)) except TypeError: # certain LooseVersion comparions raise due to unorderable types, # fallback to string comparison return cmp([str(v) for v in LooseVersion(version1).version], [str(v) for v in LooseVersion(version2).version]) def highest_version(versions): return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
bsd-3-clause
frabcus/mpcv
bin/parl.2017-06-08/01-all-candidates.py
1
2347
#!/usr/bin/env python3 import sys import os import collections import datetime import flask_mail import boto.s3.key sys.path.append(os.getcwd()) import app import identity import lookups app.app.config['SERVER_NAME'] = 'cv.democracyclub.org.uk' # Get list of when last sent last_sent_by_email = lookups.candidate_mail_last_sent(app.app.config) with app.app.app_context(): for constituency in lookups.all_constituencies(app.app.config): for candidate in constituency: if candidate['id'] in [5819]: print("unsubscribed", candidate) continue if candidate['has_cv']: continue if not candidate['email']: continue # Only mail to ones we haven't mailed recently if candidate['email'] in last_sent_by_email: back_to = datetime.datetime.now() - datetime.timedelta(days=14) last_sent = last_sent_by_email[candidate['email']] if last_sent > back_to: print("skipping too recent", candidate['email'], last_sent, ">", back_to) continue link = identity.generate_upload_url(app.app.secret_key, candidate['id']) body = '''Hi! Great that you're standing for Parliament again! At the last General Election, we found voters love to learn more about you by seeing the career history on your CV. To share your CV with voters, follow this link. {link} If you're having trouble, reply to this email with an attachment! Many thanks, Francis Volunteer, Democracy Club CVs http://cv.democracyclub.org.uk/ '''.format(link=link, linkedin_url=candidate['linkedin_url'], name=candidate['name']) print("sending to: " + candidate['email']) # For debugging: #print("\n" + body) #candidate['email'] = '[email protected]' msg = flask_mail.Message(body=body, subject="Your voters would like to see your CV!", sender=("Democracy Club CVs", "[email protected]"), recipients=[ (candidate['name'], candidate['email']) ] ) app.mail.send(msg) lookups.candidate_mail_sent(app.app.config, candidate['email'])
agpl-3.0
pavels/pootle
pootle/core/utils/wordcount.py
4
4873
# -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import re re._MAXCACHE = 1000 remove = re.compile(u"[\.]+", re.U) # dots delimiters = re.compile(u"[\W]+", re.U) # anything except a-z, A-Z and _ delimiters_begin = re.compile(u"^[\W]+", re.U) # anything except a-z, A-Z and _ delimiters_end = re.compile(u"[\W]+$", re.U) # anything except a-z, A-Z and _ english_date = re.compile( u"(^|\W)(January|February|March|April|May|June|July|August|September|" u"October|November|December)\s+\d{1,2},\s+(?:\d{2})?\d{2}(\W|$)", re.U ) escaped_xmltag_regex = re.compile(u'(&lt;\/?[\w]+.*?>)', re.U) xmltag_regex = re.compile(u'(<\/?[\w]+.*?>)', re.U) java_format_regex = re.compile(u'(\\\{\d+\\\}|\{\d+\})', re.U) template_format_regex = re.compile(u'(\$\{[\w\.\:]+\})', re.U) android_format_regex = re.compile(u'(%\d\$\w)', re.U) sprintf_regex = re.compile(u'(%[\d]*(?:.\d+)*(?:h|l|I|I32|I64)*[cdiouxefgns])', re.U) objective_c_regex = re.compile(u'(%@)', re.U) dollar_sign_regex = re.compile(u'(\$[\w\d]+?\$)', re.U) persent_sign_regex = re.compile(u'(\%[\w\d]+?\%)', re.U) newline_regex = re.compile(u'(\{\\\n\})', re.U) escaping_sqc_regex = re.compile(u'(\\\+[rnt])', re.U) xml_entities_regex = re.compile(u'(&#\d+;|&\w+;)', re.U) product_names_regex = re.compile( u"(Evernote International|Evernote Food|Evernote Hello|Evernote Clearly|" u"Evernote Business|Skitch|Evernote®?|Food|^Hello$|Clearly)", re.U ) shortcuts_regex = re.compile(u'(Ctrl\+\w$|Shift\+\w$|Alt\+\w$)', re.U) shortcuts_modifier_regex = re.compile(u'(Ctrl\+$|Shift\+$|Alt\+$)', re.U) hanging_symbols_regex = \ re.compile(u'(^[^\w\&]\s|\s[^\w\&]\s|\s[^\w\&]$|^[^\w\&]$)', re.U) def find_placeholders(aref, regex, cls=''): # regex is compiled re object with pattern surrounded by "()" i = 0 while i < len(aref): chunk = aref[i] if not chunk['translate']: i += 1 else: subchunks = regex.split(chunk['string']) a = [] translate = False for subchunk in subchunks: translate = not translate a.append({ 'translate': translate, 'string': subchunk, 'class': cls }) aref[i:i+1] = a i += len(a) def wordcount(string): string = re.sub('\n', '{\\n}', string) chunks = [{ 'translate': 1, 'string': u'%s' % string, }] # FIXME: provide line continuations to fit lines below 80 chars # Escaped XML tags (used in some strings) find_placeholders(chunks, escaped_xmltag_regex) # XML tags find_placeholders(chunks, xmltag_regex) # Java format and it's escaped version find_placeholders(chunks, java_format_regex) # Template format find_placeholders(chunks, template_format_regex) # Android format find_placeholders(chunks, android_format_regex) # sprintf find_placeholders(chunks, sprintf_regex) # Objective C style placeholders find_placeholders(chunks, objective_c_regex) # Dollar sign placeholders find_placeholders(chunks, dollar_sign_regex) # Percent sign placeholders find_placeholders(chunks, persent_sign_regex) # '{\n}' newline marker find_placeholders(chunks, newline_regex) # Escaping sequences (\n, \r, \t) find_placeholders(chunks, escaping_sqc_regex) # XML entities find_placeholders(chunks, xml_entities_regex) # Product names find_placeholders(chunks, product_names_regex) # Shortcuts find_placeholders(chunks, shortcuts_regex) # Shortcut modifiers find_placeholders(chunks, shortcuts_modifier_regex) # Find patterns that are not counted as words in Trados # Hanging symbols (excluding a-z, _ and &) find_placeholders(chunks, hanging_symbols_regex, 'dont-count') return _count_words(chunks) def _count_words(aref): # These rules are based on observed Trados 2007 word calculation behavior n = 0 for chunk in aref: if chunk['translate']: s = chunk['string'] # Replace the date with just the month name (i.e. count as a single # word) s = english_date.sub(u'\g<1>\g<2>\g<3>', s) s = remove.sub(u'', s) s = delimiters_begin.sub(u'', s) s = delimiters_end.sub(u'', s) a = delimiters.split(s) if len(a) > 1 and a[-1] == u'': a.pop() if len(a) == 1 and a[0] == u'': a.pop() n += len(a) return n
gpl-3.0
wkschwartz/django
tests/template_tests/syntax_tests/test_url.py
34
11815
from django.template import RequestContext, TemplateSyntaxError from django.test import RequestFactory, SimpleTestCase, override_settings from django.urls import NoReverseMatch, resolve from ..utils import setup @override_settings(ROOT_URLCONF='template_tests.urls') class UrlTagTests(SimpleTestCase): request_factory = RequestFactory() # Successes @setup({'url01': '{% url "client" client.id %}'}) def test_url01(self): output = self.engine.render_to_string('url01', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/') @setup({'url02': '{% url "client_action" id=client.id action="update" %}'}) def test_url02(self): output = self.engine.render_to_string('url02', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/update/') @setup({'url02a': '{% url "client_action" client.id "update" %}'}) def test_url02a(self): output = self.engine.render_to_string('url02a', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/update/') @setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"}) def test_url02b(self): output = self.engine.render_to_string('url02b', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/update/') @setup({'url02c': "{% url 'client_action' client.id 'update' %}"}) def test_url02c(self): output = self.engine.render_to_string('url02c', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/update/') @setup({'url03': '{% url "index" %}'}) def test_url03(self): output = self.engine.render_to_string('url03') self.assertEqual(output, '/') @setup({'url04': '{% url "named.client" client.id %}'}) def test_url04(self): output = self.engine.render_to_string('url04', {'client': {'id': 1}}) self.assertEqual(output, '/named-client/1/') @setup({'url05': '{% url "метка_оператора" v %}'}) def test_url05(self): output = self.engine.render_to_string('url05', {'v': 'Ω'}) self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/') @setup({'url06': '{% url "метка_оператора_2" tag=v %}'}) def test_url06(self): output = self.engine.render_to_string('url06', {'v': 'Ω'}) self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/') @setup({'url08': '{% url "метка_оператора" v %}'}) def test_url08(self): output = self.engine.render_to_string('url08', {'v': 'Ω'}) self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/') @setup({'url09': '{% url "метка_оператора_2" tag=v %}'}) def test_url09(self): output = self.engine.render_to_string('url09', {'v': 'Ω'}) self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/') @setup({'url10': '{% url "client_action" id=client.id action="two words" %}'}) def test_url10(self): output = self.engine.render_to_string('url10', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/two%20words/') @setup({'url11': '{% url "client_action" id=client.id action="==" %}'}) def test_url11(self): output = self.engine.render_to_string('url11', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/==/') @setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'}) def test_url12(self): output = self.engine.render_to_string('url12', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/!$&amp;&#x27;()*+,;=~:@,/') @setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'}) def test_url13(self): output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']}) self.assertEqual(output, '/client/1/a-b/') @setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'}) def test_url14(self): output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']}) self.assertEqual(output, '/client/1/a-b/') @setup({'url15': '{% url "client_action" 12 "test" %}'}) def test_url15(self): output = self.engine.render_to_string('url15') self.assertEqual(output, '/client/12/test/') @setup({'url18': '{% url "client" "1,2" %}'}) def test_url18(self): output = self.engine.render_to_string('url18') self.assertEqual(output, '/client/1,2/') @setup({'url19': '{% url named_url client.id %}'}) def test_url19(self): output = self.engine.render_to_string( 'url19', {'client': {'id': 1}, 'named_url': 'client'} ) self.assertEqual(output, '/client/1/') @setup({'url20': '{% url url_name_in_var client.id %}'}) def test_url20(self): output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'}) self.assertEqual(output, '/named-client/1/') @setup({'url21': '{% autoescape off %}' '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}' '{% endautoescape %}'}) def test_url21(self): output = self.engine.render_to_string('url21', {'client': {'id': 1}}) self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/') # Failures @setup({'url-fail01': '{% url %}'}) def test_url_fail01(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail01') @setup({'url-fail02': '{% url "no_such_view" %}'}) def test_url_fail02(self): with self.assertRaises(NoReverseMatch): self.engine.render_to_string('url-fail02') @setup({'url-fail03': '{% url "client" %}'}) def test_url_fail03(self): with self.assertRaises(NoReverseMatch): self.engine.render_to_string('url-fail03') @setup({'url-fail04': '{% url "view" id, %}'}) def test_url_fail04(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail04') @setup({'url-fail05': '{% url "view" id= %}'}) def test_url_fail05(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail05') @setup({'url-fail06': '{% url "view" a.id=id %}'}) def test_url_fail06(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail06') @setup({'url-fail07': '{% url "view" a.id!id %}'}) def test_url_fail07(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail07') @setup({'url-fail08': '{% url "view" id="unterminatedstring %}'}) def test_url_fail08(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail08') @setup({'url-fail09': '{% url "view" id=", %}'}) def test_url_fail09(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('url-fail09') @setup({'url-fail11': '{% url named_url %}'}) def test_url_fail11(self): with self.assertRaises(NoReverseMatch): self.engine.render_to_string('url-fail11') @setup({'url-fail12': '{% url named_url %}'}) def test_url_fail12(self): with self.assertRaises(NoReverseMatch): self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'}) @setup({'url-fail13': '{% url named_url %}'}) def test_url_fail13(self): with self.assertRaises(NoReverseMatch): self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'}) @setup({'url-fail14': '{% url named_url id, %}'}) def test_url_fail14(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('url-fail14', {'named_url': 'view'}) @setup({'url-fail15': '{% url named_url id= %}'}) def test_url_fail15(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('url-fail15', {'named_url': 'view'}) @setup({'url-fail16': '{% url named_url a.id=id %}'}) def test_url_fail16(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('url-fail16', {'named_url': 'view'}) @setup({'url-fail17': '{% url named_url a.id!id %}'}) def test_url_fail17(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('url-fail17', {'named_url': 'view'}) @setup({'url-fail18': '{% url named_url id="unterminatedstring %}'}) def test_url_fail18(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('url-fail18', {'named_url': 'view'}) @setup({'url-fail19': '{% url named_url id=", %}'}) def test_url_fail19(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('url-fail19', {'named_url': 'view'}) # {% url ... as var %} @setup({'url-asvar01': '{% url "index" as url %}'}) def test_url_asvar01(self): output = self.engine.render_to_string('url-asvar01') self.assertEqual(output, '') @setup({'url-asvar02': '{% url "index" as url %}{{ url }}'}) def test_url_asvar02(self): output = self.engine.render_to_string('url-asvar02') self.assertEqual(output, '/') @setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'}) def test_url_asvar03(self): output = self.engine.render_to_string('url-asvar03') self.assertEqual(output, '') @setup({'url-namespace01': '{% url "app:named.client" 42 %}'}) def test_url_namespace01(self): request = self.request_factory.get('/') request.resolver_match = resolve('/ns1/') template = self.engine.get_template('url-namespace01') context = RequestContext(request) output = template.render(context) self.assertEqual(output, '/ns1/named-client/42/') @setup({'url-namespace02': '{% url "app:named.client" 42 %}'}) def test_url_namespace02(self): request = self.request_factory.get('/') request.resolver_match = resolve('/ns2/') template = self.engine.get_template('url-namespace02') context = RequestContext(request) output = template.render(context) self.assertEqual(output, '/ns2/named-client/42/') @setup({'url-namespace03': '{% url "app:named.client" 42 %}'}) def test_url_namespace03(self): request = self.request_factory.get('/') template = self.engine.get_template('url-namespace03') context = RequestContext(request) output = template.render(context) self.assertEqual(output, '/ns2/named-client/42/') @setup({'url-namespace-no-current-app': '{% url "app:named.client" 42 %}'}) def test_url_namespace_no_current_app(self): request = self.request_factory.get('/') request.resolver_match = resolve('/ns1/') request.current_app = None template = self.engine.get_template('url-namespace-no-current-app') context = RequestContext(request) output = template.render(context) self.assertEqual(output, '/ns2/named-client/42/') @setup({'url-namespace-explicit-current-app': '{% url "app:named.client" 42 %}'}) def test_url_namespace_explicit_current_app(self): request = self.request_factory.get('/') request.resolver_match = resolve('/ns1/') request.current_app = 'app' template = self.engine.get_template('url-namespace-explicit-current-app') context = RequestContext(request) output = template.render(context) self.assertEqual(output, '/ns2/named-client/42/')
bsd-3-clause
argriffing/numpy
numpy/core/tests/test_records.py
5
13700
from __future__ import division, absolute_import, print_function import sys import collections import pickle from os import path import numpy as np from numpy.compat import asbytes from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_array_equal, assert_array_almost_equal, assert_raises ) class TestFromrecords(TestCase): def test_fromrecords(self): r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], names='col1,col2,col3') assert_equal(r[0].item(), (456, 'dbe', 1.2)) assert_equal(r['col1'].dtype.kind, 'i') if sys.version_info[0] >= 3: assert_equal(r['col2'].dtype.kind, 'U') assert_equal(r['col2'].dtype.itemsize, 12) else: assert_equal(r['col2'].dtype.kind, 'S') assert_equal(r['col2'].dtype.itemsize, 3) assert_equal(r['col3'].dtype.kind, 'f') def test_method_array(self): r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big') assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924)) def test_method_array2(self): r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') assert_equal(r[1].item(), (2, 22.0, asbytes('b'))) def test_recarray_slices(self): r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d'))) def test_recarray_fromarrays(self): x1 = np.array([1, 2, 3, 4]) x2 = np.array(['a', 'dd', 'xyz', '12']) x3 = np.array([1.1, 2, 3, 4]) r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') assert_equal(r[1].item(), (2, 'dd', 2.0)) x1[1] = 34 assert_equal(r.a, np.array([1, 2, 3, 4])) def test_recarray_fromfile(self): data_dir = path.join(path.dirname(__file__), 'data') filename = path.join(data_dir, 'recarray_from_file.fits') fd = open(filename, 'rb') fd.seek(2880 * 2) r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') fd.seek(2880 * 2) r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') fd.close() assert_equal(r1, r2) def test_recarray_from_obj(self): count = 10 a = np.zeros(count, dtype='O') b = np.zeros(count, dtype='f8') c = np.zeros(count, dtype='f8') for i in range(len(a)): a[i] = list(range(1, 10)) mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') for i in range(len(a)): assert_((mine.date[i] == list(range(1, 10)))) assert_((mine.data1[i] == 0.0)) assert_((mine.data2[i] == 0.0)) def test_recarray_from_repr(self): a = np.array([(1,'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) recordarr = np.rec.array(a) recarr = a.view(np.recarray) recordview = a.view(np.dtype((np.record, a.dtype))) recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np}) recarr_r = eval("numpy." + repr(recarr), {'numpy': np}) recordview_r = eval("numpy." + repr(recordview), {'numpy': np}) assert_equal(type(recordarr_r), np.recarray) assert_equal(recordarr_r.dtype.type, np.record) assert_equal(recordarr, recordarr_r) assert_equal(type(recarr_r), np.recarray) assert_equal(recarr_r.dtype.type, np.record) assert_equal(recarr, recarr_r) assert_equal(type(recordview_r), np.ndarray) assert_equal(recordview.dtype.type, np.record) assert_equal(recordview, recordview_r) def test_recarray_views(self): a = np.array([(1,'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) b = np.array([1,2,3,4,5], dtype=np.int64) #check that np.rec.array gives right dtypes assert_equal(np.rec.array(a).dtype.type, np.record) assert_equal(type(np.rec.array(a)), np.recarray) assert_equal(np.rec.array(b).dtype.type, np.int64) assert_equal(type(np.rec.array(b)), np.recarray) #check that viewing as recarray does the same assert_equal(a.view(np.recarray).dtype.type, np.record) assert_equal(type(a.view(np.recarray)), np.recarray) assert_equal(b.view(np.recarray).dtype.type, np.int64) assert_equal(type(b.view(np.recarray)), np.recarray) #check that view to non-structured dtype preserves type=np.recarray r = np.rec.array(np.ones(4, dtype="f4,i4")) rv = r.view('f8').view('f4,i4') assert_equal(type(rv), np.recarray) assert_equal(rv.dtype.type, np.record) #check that getitem also preserves np.recarray and np.record r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'i4,i4')])) assert_equal(r['c'].dtype.type, np.record) assert_equal(type(r['c']), np.recarray) assert_equal(r[['a', 'b']].dtype.type, np.record) assert_equal(type(r[['a', 'b']]), np.recarray) #and that it preserves subclasses (gh-6949) class C(np.recarray): pass c = r.view(C) assert_equal(type(c['c']), C) # check that accessing nested structures keep record type, but # not for subarrays, non-void structures, non-structured voids test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)), ('d', ('i8', 'i4,i4'))] r = np.rec.array([((1,1), b'11111111', [1,1], 1), ((1,1), b'11111111', [1,1], 1)], dtype=test_dtype) assert_equal(r.a.dtype.type, np.record) assert_equal(r.b.dtype.type, np.void) assert_equal(r.c.dtype.type, np.float32) assert_equal(r.d.dtype.type, np.int64) # check the same, but for views r = np.rec.array(np.ones(4, dtype='i4,i4')) assert_equal(r.view('f4,f4').dtype.type, np.record) assert_equal(r.view(('i4',2)).dtype.type, np.int32) assert_equal(r.view('V8').dtype.type, np.void) assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64) #check that we can undo the view arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')] for arr in arrs: rec = np.rec.array(arr) # recommended way to view as an ndarray: arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray) assert_equal(arr2.dtype.type, arr.dtype.type) assert_equal(type(arr2), type(arr)) def test_recarray_repr(self): # make sure non-structured dtypes also show up as rec.array a = np.array(np.ones(4, dtype='f8')) assert_(repr(np.rec.array(a)).startswith('rec.array')) # check that the 'np.record' part of the dtype isn't shown a = np.rec.array(np.ones(3, dtype='i4,i4')) assert_equal(repr(a).find('numpy.record'), -1) a = np.rec.array(np.ones(3, dtype='i4')) assert_(repr(a).find('dtype=int32') != -1) def test_recarray_from_names(self): ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)], names='c1, c2, c3, c4') pa = np.rec.fromrecords([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)], names='c1, c2, c3, c4') assert_(ra.dtype == pa.dtype) assert_(ra.shape == pa.shape) for k in range(len(ra)): assert_(ra[k].item() == pa[k].item()) def test_recarray_conflict_fields(self): ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), (3, 'wrs', 1.3)], names='field, shape, mean') ra.mean = [1.1, 2.2, 3.3] assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) assert_(type(ra.mean) is type(ra.var)) ra.shape = (1, 3) assert_(ra.shape == (1, 3)) ra.shape = ['A', 'B', 'C'] assert_array_equal(ra['shape'], [['A', 'B', 'C']]) ra.field = 5 assert_array_equal(ra['field'], [[5, 5, 5]]) assert_(isinstance(ra.field, collections.Callable)) def test_fromrecords_with_explicit_dtype(self): a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=[('a', int), ('b', np.object)]) assert_equal(a.a, [1, 2]) assert_equal(a[0].a, 1) assert_equal(a.b, ['a', 'bbb']) assert_equal(a[-1].b, 'bbb') # ndtype = np.dtype([('a', int), ('b', np.object)]) a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype) assert_equal(a.a, [1, 2]) assert_equal(a[0].a, 1) assert_equal(a.b, ['a', 'bbb']) assert_equal(a[-1].b, 'bbb') def test_recarray_stringtypes(self): # Issue #3993 a = np.array([('abc ', 1), ('abc', 2)], dtype=[('foo', 'S4'), ('bar', int)]) a = a.view(np.recarray) assert_equal(a.foo[0] == a.foo[1], False) def test_recarray_returntypes(self): qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)} a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')), ('abc', (2,3), 1, ('abcde', 'jklmn'))], dtype=[('foo', 'S4'), ('bar', [('A', int), ('B', int)]), ('baz', int), ('qux', qux_fields)]) assert_equal(type(a.foo), np.ndarray) assert_equal(type(a['foo']), np.ndarray) assert_equal(type(a.bar), np.recarray) assert_equal(type(a['bar']), np.recarray) assert_equal(a.bar.dtype.type, np.record) assert_equal(type(a['qux']), np.recarray) assert_equal(a.qux.dtype.type, np.record) assert_equal(dict(a.qux.dtype.fields), qux_fields) assert_equal(type(a.baz), np.ndarray) assert_equal(type(a['baz']), np.ndarray) assert_equal(type(a[0].bar), np.record) assert_equal(type(a[0]['bar']), np.record) assert_equal(a[0].bar.A, 1) assert_equal(a[0].bar['A'], 1) assert_equal(a[0]['bar'].A, 1) assert_equal(a[0]['bar']['A'], 1) assert_equal(a[0].qux.D, asbytes('fgehi')) assert_equal(a[0].qux['D'], asbytes('fgehi')) assert_equal(a[0]['qux'].D, asbytes('fgehi')) assert_equal(a[0]['qux']['D'], asbytes('fgehi')) class TestRecord(TestCase): def setUp(self): self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], dtype=[("col1", "<i4"), ("col2", "<i4"), ("col3", "<i4")]) def test_assignment1(self): a = self.data assert_equal(a.col1[0], 1) a[0].col1 = 0 assert_equal(a.col1[0], 0) def test_assignment2(self): a = self.data assert_equal(a.col1[0], 1) a.col1[0] = 0 assert_equal(a.col1[0], 0) def test_invalid_assignment(self): a = self.data def assign_invalid_column(x): x[0].col5 = 1 self.assertRaises(AttributeError, assign_invalid_column, a) def test_out_of_order_fields(self): """Ticket #1431.""" x = self.data[['col1', 'col2']] y = self.data[['col2', 'col1']] assert_equal(x[0][0], y[0][1]) def test_pickle_1(self): # Issue #1529 a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)]) assert_equal(a, pickle.loads(pickle.dumps(a))) assert_equal(a[0], pickle.loads(pickle.dumps(a[0]))) def test_pickle_2(self): a = self.data assert_equal(a, pickle.loads(pickle.dumps(a))) assert_equal(a[0], pickle.loads(pickle.dumps(a[0]))) def test_pickle_3(self): # Issue #7140 a = self.data pa = pickle.loads(pickle.dumps(a[0])) assert_(pa.flags.c_contiguous) assert_(pa.flags.f_contiguous) assert_(pa.flags.writeable) assert_(pa.flags.aligned) def test_objview_record(self): # https://github.com/numpy/numpy/issues/2599 dt = np.dtype([('foo', 'i8'), ('bar', 'O')]) r = np.zeros((1,3), dtype=dt).view(np.recarray) r.foo = np.array([1, 2, 3]) # TypeError? # https://github.com/numpy/numpy/issues/3256 ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)]) ra[['x','y']] # TypeError? def test_record_scalar_setitem(self): # https://github.com/numpy/numpy/issues/3561 rec = np.recarray(1, dtype=[('x', float, 5)]) rec[0].x = 1 assert_equal(rec[0].x, np.ones(5)) def test_missing_field(self): # https://github.com/numpy/numpy/issues/4806 arr = np.zeros((3,), dtype=[('x', int), ('y', int)]) assert_raises(ValueError, lambda: arr[['nofield']]) def test_find_duplicate(): l1 = [1, 2, 3, 4, 5, 6] assert_(np.rec.find_duplicate(l1) == []) l2 = [1, 2, 1, 4, 5, 6] assert_(np.rec.find_duplicate(l2) == [1]) l3 = [1, 2, 1, 4, 1, 6, 2, 3] assert_(np.rec.find_duplicate(l3) == [1, 2]) l3 = [2, 2, 1, 4, 1, 6, 2, 3] assert_(np.rec.find_duplicate(l3) == [2, 1]) if __name__ == "__main__": run_module_suite()
bsd-3-clause
drayanaindra/django-shop
shop/tests/api.py
16
2260
from shop.models.ordermodel import OrderExtraInfo, Order from django.test.testcases import TestCase from django.contrib.auth.models import User from shop.tests.util import Mock from shop.shop_api import ShopAPI from decimal import Decimal class ShopApiTestCase(TestCase): def setUp(self): self.user = User.objects.create(username="test", email="[email protected]") self.request = Mock() setattr(self.request, 'user', None) self.order = Order() self.order.order_subtotal = Decimal('10.95') self.order.order_total = Decimal('10.95') self.order.shipping_cost = Decimal('0') self.order.shipping_address_text = 'shipping address example' self.order.billing_address_text = 'billing address example' self.order.save() def test_add_extra_info(self): api = ShopAPI() api.add_extra_info(self.order, 'test') # Assert that an ExtraOrderInfo item was created oei = OrderExtraInfo.objects.get(order=self.order) self.assertEqual(oei.text, 'test') def test_is_order_paid(self): api = ShopAPI() # Ensure deprecated method still works res = api.is_order_payed(self.order) self.assertEqual(res, False) res = api.is_order_paid(self.order) self.assertEqual(res, False) def test_is_order_complete(self): api = ShopAPI() res = api.is_order_completed(self.order) self.assertEqual(res, False) def test_get_order_total(self): api = ShopAPI() res = api.get_order_total(self.order) self.assertEqual(res, Decimal('10.95')) def test_get_order_subtotal(self): api = ShopAPI() res = api.get_order_subtotal(self.order) self.assertEqual(res, Decimal('10.95')) def test_get_order_short_name(self): api = ShopAPI() res = api.get_order_short_name(self.order) self.assertEqual(res, '1-10.95') def test_get_order_unique_id(self): api = ShopAPI() res = api.get_order_unique_id(self.order) self.assertEqual(res, 1) def test_get_order_for_id(self): api = ShopAPI() res = api.get_order_for_id(1) self.assertEqual(res, self.order)
bsd-3-clause
rellla/xbmca10
tools/EventClients/lib/python/ps3/sixpair.py
208
2903
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import usb vendor = 0x054c product = 0x0268 timeout = 5000 passed_value = 0x03f5 def find_sixaxes(): res = [] for bus in usb.busses(): for dev in bus.devices: if dev.idVendor == vendor and dev.idProduct == product: res.append(dev) return res def find_interface(dev): for cfg in dev.configurations: for itf in cfg.interfaces: for alt in itf: if alt.interfaceClass == 3: return alt raise Exception("Unable to find interface") def mac_to_string(mac): return "%02x:%02x:%02x:%02x:%02x:%02x" % (mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]) def set_pair_filename(dirname, filename, mac): for bus in usb.busses(): if int(bus.dirname) == int(dirname): for dev in bus.devices: if int(dev.filename) == int(filename): if dev.idVendor == vendor and dev.idProduct == product: update_pair(dev, mac) return else: raise Exception("Device is not a sixaxis") raise Exception("Device not found") def set_pair(dev, mac): itf = find_interface(dev) handle = dev.open() msg = (0x01, 0x00) + mac; try: handle.detachKernelDriver(itf.interfaceNumber) except usb.USBError: pass handle.claimInterface(itf.interfaceNumber) try: handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS | usb.RECIP_INTERFACE , usb.REQ_SET_CONFIGURATION, msg, passed_value, itf.interfaceNumber, timeout) finally: handle.releaseInterface() def get_pair(dev): itf = find_interface(dev) handle = dev.open() try: handle.detachKernelDriver(itf.interfaceNumber) except usb.USBError: pass handle.claimInterface(itf.interfaceNumber) try: msg = handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS | usb.RECIP_INTERFACE , usb.REQ_CLEAR_FEATURE, 8, passed_value, itf.interfaceNumber, timeout) finally: handle.releaseInterface() return msg[2:8] def set_pair_all(mac): devs = find_sixaxes() for dev in devs: update_pair(dev, mac) def update_pair(dev, mac): old = get_pair(dev) if old != mac: print "Reparing sixaxis from:" + mac_to_string(old) + " to:" + mac_to_string(mac) set_pair(dev, mac) if __name__=="__main__": devs = find_sixaxes() mac = None if len(sys.argv) > 1: try: mac = sys.argv[1].split(':') mac = tuple([int(x, 16) for x in mac]) if len(mac) != 6: print "Invalid length of HCI address, should be 6 parts" mac = None except: print "Failed to parse HCI address" mac = None for dev in devs: if mac: update_pair(dev, mac) else: print "Found sixaxis paired to: " + mac_to_string(get_pair(dev))
gpl-2.0
stevenaubertin/showsServer
lib/werkzeug/datastructures.py
146
86337
# -*- coding: utf-8 -*- """ werkzeug.datastructures ~~~~~~~~~~~~~~~~~~~~~~~ This module provides mixins and classes with an immutable interface. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import sys import codecs import mimetypes from copy import deepcopy from itertools import repeat from werkzeug._internal import _missing, _empty_stream from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \ PY2, text_type, integer_types, string_types, make_literal_wrapper, \ to_native _locale_delim_re = re.compile(r'[_-]') def is_immutable(self): raise TypeError('%r objects are immutable' % self.__class__.__name__) def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in iteritems(mapping, multi=True): yield item elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): for value in value: yield key, value else: yield key, value else: for item in mapping: yield item def native_itermethods(names): if not PY2: return lambda x: x def setmethod(cls, name): itermethod = getattr(cls, name) setattr(cls, 'iter%s' % name, itermethod) listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw)) listmethod.__doc__ = \ 'Like :py:meth:`iter%s`, but returns a list.' % name setattr(cls, name, listmethod) def wrap(cls): for name in names: setmethod(cls, name) return cls return wrap class ImmutableListMixin(object): """Makes a :class:`list` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(tuple(self)) return rv def __reduce_ex__(self, protocol): return type(self), (list(self),) def __delitem__(self, key): is_immutable(self) def __delslice__(self, i, j): is_immutable(self) def __iadd__(self, other): is_immutable(self) __imul__ = __iadd__ def __setitem__(self, key, value): is_immutable(self) def __setslice__(self, i, j, value): is_immutable(self) def append(self, item): is_immutable(self) remove = append def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def reverse(self): is_immutable(self) def sort(self, cmp=None, key=None, reverse=None): is_immutable(self) class ImmutableList(ImmutableListMixin, list): """An immutable :class:`list`. .. versionadded:: 0.5 :private: """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, dict.__repr__(self), ) class ImmutableDictMixin(object): """Makes a :class:`dict` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None @classmethod def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance def __reduce_ex__(self, protocol): return type(self), (dict(self),) def _iter_hashitems(self): return iteritems(self) def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) return rv def setdefault(self, key, default=None): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def pop(self, key, default=None): is_immutable(self) def popitem(self): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def __delitem__(self, key): is_immutable(self) def clear(self): is_immutable(self) class ImmutableMultiDictMixin(ImmutableDictMixin): """Makes a :class:`MultiDict` immutable. .. versionadded:: 0.5 :private: """ def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def _iter_hashitems(self): return iteritems(self, multi=True) def add(self, key, value): is_immutable(self) def popitemlist(self): is_immutable(self) def poplist(self, key): is_immutable(self) def setlist(self, key, new_list): is_immutable(self) def setlistdefault(self, key, default_list=None): is_immutable(self) class UpdateDictMixin(object): """Makes dicts call `self.on_update` on modifications. .. versionadded:: 0.5 :private: """ on_update = None def calls_update(name): def oncall(self, *args, **kw): rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) if self.on_update is not None: self.on_update(self) return rv oncall.__name__ = name return oncall def setdefault(self, key, default=None): modified = key not in self rv = super(UpdateDictMixin, self).setdefault(key, default) if modified and self.on_update is not None: self.on_update(self) return rv def pop(self, key, default=_missing): modified = key in self if default is _missing: rv = super(UpdateDictMixin, self).pop(key) else: rv = super(UpdateDictMixin, self).pop(key, default) if modified and self.on_update is not None: self.on_update(self) return rv __setitem__ = calls_update('__setitem__') __delitem__ = calls_update('__delitem__') clear = calls_update('clear') popitem = calls_update('popitem') update = calls_update('update') del calls_update class TypeConversionDict(dict): """Works like a regular dict but the :meth:`get` method can perform type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` are subclasses of this class and provide the same feature. .. versionadded:: 0.5 """ def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = TypeConversionDict(foo='42', bar='blub') >>> d.get('foo', type=int) 42 >>> d.get('bar', -1, type=int) -1 :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self[key] if type is not None: rv = type(rv) except (KeyError, ValueError): rv = default return rv class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): """Works like a :class:`TypeConversionDict` but does not support modifications. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return TypeConversionDict(self) def __copy__(self): return self @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class MultiDict(TypeConversionDict): """A :class:`MultiDict` is a dictionary subclass customized to deal with multiple values for the same key which is for example used by the parsing functions in the wrappers. This is necessary because some HTML form elements pass multiple values for the same key. :class:`MultiDict` implements all standard dictionary methods. Internally, it saves all values for a key as a list, but the standard dict access methods will only return the first value for a key. If you want to gain access to the other values, too, you have to use the `list` methods as explained below. Basic Usage: >>> d = MultiDict([('a', 'b'), ('a', 'c')]) >>> d MultiDict([('a', 'b'), ('a', 'c')]) >>> d['a'] 'b' >>> d.getlist('a') ['b', 'c'] >>> 'a' in d True It behaves like a normal dict thus all dict functions will only return the first value when multiple values for one key are found. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. A :class:`MultiDict` can be constructed from an iterable of ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 onwards some keyword parameters. :param mapping: the initial value for the :class:`MultiDict`. Either a regular dict, an iterable of ``(key, value)`` tuples or `None`. """ def __init__(self, mapping=None): if isinstance(mapping, MultiDict): dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping))) elif isinstance(mapping, dict): tmp = {} for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): value = list(value) else: value = [value] tmp[key] = value dict.__init__(self, tmp) else: tmp = {} for key, value in mapping or (): tmp.setdefault(key, []).append(value) dict.__init__(self, tmp) def __getstate__(self): return dict(self.lists()) def __setstate__(self, value): dict.clear(self) dict.update(self, value) def __getitem__(self, key): """Return the first data value for this key; raises KeyError if not found. :param key: The key to be looked up. :raise KeyError: if the key does not exist. """ if key in self: return dict.__getitem__(self, key)[0] raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): """Like :meth:`add` but removes an existing key first. :param key: the key for the value. :param value: the value to set. """ dict.__setitem__(self, key, [value]) def add(self, key, value): """Adds a new value for the key. .. versionadded:: 0.6 :param key: the key for the value. :param value: the value to add. """ dict.setdefault(self, key, []).append(value) def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return list(rv) result = [] for item in rv: try: result.append(type(item)) except ValueError: pass return result def setlist(self, key, new_list): """Remove the old values for a key and add new ones. Note that the list you pass the values in will be shallow-copied before it is inserted in the dictionary. >>> d = MultiDict() >>> d.setlist('foo', ['1', '2']) >>> d['foo'] '1' >>> d.getlist('foo') ['1', '2'] :param key: The key for which the values are set. :param new_list: An iterable with the new values for the key. Old values are removed first. """ dict.__setitem__(self, key, list(new_list)) def setdefault(self, key, default=None): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key not in self: self[key] = default else: default = self[key] return default def setlistdefault(self, key, default_list=None): """Like `setdefault` but sets multiple values. The list returned is not a copy, but the list that is actually used internally. This means that you can put new values into the dict by appending items to the list: >>> d = MultiDict({"foo": 1}) >>> d.setlistdefault("foo").extend([2, 3]) >>> d.getlist("foo") [1, 2, 3] :param key: The key to be looked up. :param default: An iterable of default values. It is either copied (in case it was a list) or converted into a list before returned. :return: a :class:`list` """ if key not in self: default_list = list(default_list or ()) dict.__setitem__(self, key, default_list) else: default_list = dict.__getitem__(self, key) return default_list def items(self, multi=False): """Return an iterator of ``(key, value)`` pairs. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. """ for key, values in iteritems(dict, self): if multi: for value in values: yield key, value else: yield key, values[0] def lists(self): """Return a list of ``(key, values)`` pairs, where values is the list of all values associated with the key.""" for key, values in iteritems(dict, self): yield key, list(values) def keys(self): return iterkeys(dict, self) __iter__ = keys def values(self): """Returns an iterator of the first value on every key's value list.""" for values in itervalues(dict, self): yield values[0] def listvalues(self): """Return an iterator of all values associated with a key. Zipping :meth:`keys` and this is the same as calling :meth:`lists`: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> zip(d.keys(), d.listvalues()) == d.lists() True """ return itervalues(dict, self) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self) def deepcopy(self, memo=None): """Return a deep copy of this object.""" return self.__class__(deepcopy(self.to_dict(flat=False), memo)) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: return dict(iteritems(self)) return dict(self.lists()) def update(self, other_dict): """update() extends rather than replaces existing key lists.""" for key, value in iter_multi_items(other_dict): MultiDict.add(self, key, value) def pop(self, key, default=_missing): """Pop the first item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: return dict.pop(self, key)[0] except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) def popitem(self): """Pop an item from the dict.""" try: item = dict.popitem(self) return (item[0], item[1][0]) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def poplist(self, key): """Pop the list for a key from the dict. If the key is not in the dict an empty list is returned. .. versionchanged:: 0.5 If the key does no longer exist a list is returned instead of raising an error. """ return dict.pop(self, key, []) def popitemlist(self): """Pop a ``(key, list)`` tuple from the dict.""" try: return dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.deepcopy(memo=memo) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True))) class _omd_bucket(object): """Wraps values in the :class:`OrderedMultiDict`. This makes it possible to keep an order over multiple different keys. It requires a lot of extra memory and slows down access a lot, but makes it possible to access elements in O(1) and iterate in O(n). """ __slots__ = ('prev', 'key', 'value', 'next') def __init__(self, omd, key, value): self.prev = omd._last_bucket self.key = key self.value = value self.next = None if omd._first_bucket is None: omd._first_bucket = self if omd._last_bucket is not None: omd._last_bucket.next = self omd._last_bucket = self def unlink(self, omd): if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev if omd._first_bucket is self: omd._first_bucket = self.next if omd._last_bucket is self: omd._last_bucket = self.prev @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class OrderedMultiDict(MultiDict): """Works like a regular :class:`MultiDict` but preserves the order of the fields. To convert the ordered multi dict into a list you can use the :meth:`items` method and pass it ``multi=True``. In general an :class:`OrderedMultiDict` is an order of magnitude slower than a :class:`MultiDict`. .. admonition:: note Due to a limitation in Python you cannot convert an ordered multi dict into a regular dict by using ``dict(multidict)``. Instead you have to use the :meth:`to_dict` method, otherwise the internal bucket objects are exposed. """ def __init__(self, mapping=None): dict.__init__(self) self._first_bucket = self._last_bucket = None if mapping is not None: OrderedMultiDict.update(self, mapping) def __eq__(self, other): if not isinstance(other, MultiDict): return NotImplemented if isinstance(other, OrderedMultiDict): iter1 = iteritems(self, multi=True) iter2 = iteritems(other, multi=True) try: for k1, v1 in iter1: k2, v2 = next(iter2) if k1 != k2 or v1 != v2: return False except StopIteration: return False try: next(iter2) except StopIteration: return True return False if len(self) != len(other): return False for key, values in iterlists(self): if other.getlist(key) != values: return False return True def __ne__(self, other): return not self.__eq__(other) def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def __getstate__(self): return list(iteritems(self, multi=True)) def __setstate__(self, values): dict.clear(self) for key, value in values: self.add(key, value) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key)[0].value raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): self.poplist(key) self.add(key, value) def __delitem__(self, key): self.pop(key) def keys(self): return (key for key, value in iteritems(self)) __iter__ = keys def values(self): return (value for key, value in iteritems(self)) def items(self, multi=False): ptr = self._first_bucket if multi: while ptr is not None: yield ptr.key, ptr.value ptr = ptr.next else: returned_keys = set() while ptr is not None: if ptr.key not in returned_keys: returned_keys.add(ptr.key) yield ptr.key, ptr.value ptr = ptr.next def lists(self): returned_keys = set() ptr = self._first_bucket while ptr is not None: if ptr.key not in returned_keys: yield ptr.key, self.getlist(ptr.key) returned_keys.add(ptr.key) ptr = ptr.next def listvalues(self): for key, values in iterlists(self): yield values def add(self, key, value): dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) def getlist(self, key, type=None): try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return [x.value for x in rv] result = [] for item in rv: try: result.append(type(item.value)) except ValueError: pass return result def setlist(self, key, new_list): self.poplist(key) for value in new_list: self.add(key, value) def setlistdefault(self, key, default_list=None): raise TypeError('setlistdefault is unsupported for ' 'ordered multi dicts') def update(self, mapping): for key, value in iter_multi_items(mapping): OrderedMultiDict.add(self, key, value) def poplist(self, key): buckets = dict.pop(self, key, ()) for bucket in buckets: bucket.unlink(self) return [x.value for x in buckets] def pop(self, key, default=_missing): try: buckets = dict.pop(self, key) except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return buckets[0].value def popitem(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, buckets[0].value def popitemlist(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, [x.value for x in buckets] def _options_header_vkw(value, kw): return dump_options_header(value, dict((k.replace('_', '-'), v) for k, v in kw.items())) def _unicodify_header_value(value): if isinstance(value, bytes): value = value.decode('latin-1') if not isinstance(value, text_type): value = text_type(value) return value @native_itermethods(['keys', 'values', 'items']) class Headers(object): """An object that stores some headers. It has a dict-like interface but is ordered and can store the same keys multiple times. This data structure is useful if you want a nicer way to handle WSGI headers which are stored as tuples in a list. From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is also a subclass of the :class:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` class, with the exception of `__getitem__`. :mod:`wsgiref` will return `None` for ``headers['missing']``, whereas :class:`Headers` will raise a :class:`KeyError`. To create a new :class:`Headers` object pass it a list or dict of headers which are used as default values. This does not reuse the list passed to the constructor for internal usage. :param defaults: The list of default values for the :class:`Headers`. .. versionchanged:: 0.9 This data structure now stores unicode values similar to how the multi dicts do it. The main difference is that bytes can be set as well which will automatically be latin1 decoded. .. versionchanged:: 0.9 The :meth:`linked` function was removed without replacement as it was an API that does not support the changes to the encoding model. """ def __init__(self, defaults=None): self._list = [] if defaults is not None: if isinstance(defaults, (list, Headers)): self._list.extend(defaults) else: self.extend(defaults) def __getitem__(self, key, _get_mode=False): if not _get_mode: if isinstance(key, integer_types): return self._list[key] elif isinstance(key, slice): return self.__class__(self._list[key]) if not isinstance(key, string_types): raise exceptions.BadRequestKeyError(key) ikey = key.lower() for k, v in self._list: if k.lower() == ikey: return v # micro optimization: if we are in get mode we will catch that # exception one stack level down so we can raise a standard # key error instead of our special one. if _get_mode: raise KeyError() raise exceptions.BadRequestKeyError(key) def __eq__(self, other): return other.__class__ is self.__class__ and \ set(other._list) == set(self._list) def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=None, type=None, as_bytes=False): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = Headers([('Content-Length', '42')]) >>> d.get('Content-Length', type=int) 42 If a headers object is bound you must not add unicode strings because no encoding takes place. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the default value is returned. :param as_bytes: return bytes instead of unicode strings. """ try: rv = self.__getitem__(key, _get_mode=True) except KeyError: return default if as_bytes: rv = rv.encode('latin1') if type is None: return rv try: return type(rv) except ValueError: return default def getlist(self, key, type=None, as_bytes=False): """Return the list of items for a given key. If that key is not in the :class:`Headers`, the return value will be an empty list. Just as :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will be converted with the callable defined there. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. :param as_bytes: return bytes instead of unicode strings. """ ikey = key.lower() result = [] for k, v in self: if k.lower() == ikey: if as_bytes: v = v.encode('latin1') if type is not None: try: v = type(v) except ValueError: continue result.append(v) return result def get_all(self, name): """Return a list of all the values for the named field. This method is compatible with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.get_all` method. """ return self.getlist(name) def items(self, lower=False): for key, value in self: if lower: key = key.lower() yield key, value def keys(self, lower=False): for key, _ in iteritems(self, lower): yield key def values(self): for _, value in iteritems(self): yield value def extend(self, iterable): """Extend the headers with a dict or an iterable yielding keys and values. """ if isinstance(iterable, dict): for key, value in iteritems(iterable): if isinstance(value, (tuple, list)): for v in value: self.add(key, v) else: self.add(key, value) else: for key, value in iterable: self.add(key, value) def __delitem__(self, key, _index_operation=True): if _index_operation and isinstance(key, (integer_types, slice)): del self._list[key] return key = key.lower() new = [] for k, v in self._list: if k.lower() != key: new.append((k, v)) self._list[:] = new def remove(self, key): """Remove a key. :param key: The key to be removed. """ return self.__delitem__(key, _index_operation=False) def pop(self, key=None, default=_missing): """Removes and returns a key or index. :param key: The key to be popped. If this is an integer the item at that position is removed, if it's a string the value for that key is. If the key is omitted or `None` the last item is removed. :return: an item. """ if key is None: return self._list.pop() if isinstance(key, integer_types): return self._list.pop(key) try: rv = self[key] self.remove(key) except KeyError: if default is not _missing: return default raise return rv def popitem(self): """Removes a key or index and returns a (key, value) item.""" return self.pop() def __contains__(self, key): """Check if a key is present.""" try: self.__getitem__(key, _get_mode=True) except KeyError: return False return True has_key = __contains__ def __iter__(self): """Yield ``(key, value)`` tuples.""" return iter(self._list) def __len__(self): return len(self._list) def add(self, _key, _value, **kw): """Add a new header tuple to the list. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes:: >>> d = Headers() >>> d.add('Content-Type', 'text/plain') >>> d.add('Content-Disposition', 'attachment', filename='foo.png') The keyword argument dumping uses :func:`dump_options_header` behind the scenes. .. versionadded:: 0.4.1 keyword arguments were added for :mod:`wsgiref` compatibility. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) self._list.append((_key, _value)) def _validate_value(self, value): if not isinstance(value, text_type): raise TypeError('Value should be unicode.') if u'\n' in value or u'\r' in value: raise ValueError('Detected newline in header value. This is ' 'a potential security problem') def add_header(self, _key, _value, **_kw): """Add a new header tuple to the list. An alias for :meth:`add` for compatibility with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.add_header` method. """ self.add(_key, _value, **_kw) def clear(self): """Clears all headers.""" del self._list[:] def set(self, _key, _value, **kw): """Remove all header tuples for `key` and add a new one. The newly added key either appears at the end of the list if there was no entry or replaces the first one. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes. See :meth:`add` for more information. .. versionchanged:: 0.6.1 :meth:`set` now accepts the same arguments as :meth:`add`. :param key: The key to be inserted. :param value: The value to be inserted. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) if not self._list: self._list.append((_key, _value)) return listiter = iter(self._list) ikey = _key.lower() for idx, (old_key, old_value) in enumerate(listiter): if old_key.lower() == ikey: # replace first ocurrence self._list[idx] = (_key, _value) break else: self._list.append((_key, _value)) return self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] def setdefault(self, key, value): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key in self: return self[key] self.set(key, value) return value def __setitem__(self, key, value): """Like :meth:`set` but also supports index/slice based setting.""" if isinstance(key, (slice, integer_types)): if isinstance(key, integer_types): value = [value] value = [(k, _unicodify_header_value(v)) for (k, v) in value] [self._validate_value(v) for (k, v) in value] if isinstance(key, integer_types): self._list[key] = value[0] else: self._list[key] = value else: self.set(key, value) def to_list(self, charset='iso-8859-1'): """Convert the headers into a list suitable for WSGI.""" from warnings import warn warn(DeprecationWarning('Method removed, use to_wsgi_list instead'), stacklevel=2) return self.to_wsgi_list() def to_wsgi_list(self): """Convert the headers into a list suitable for WSGI. The values are byte strings in Python 2 converted to latin1 and unicode strings in Python 3 for the WSGI server to encode. :return: list """ if PY2: return [(to_native(k), v.encode('latin1')) for k, v in self] return list(self) def copy(self): return self.__class__(self._list) def __copy__(self): return self.copy() def __str__(self): """Returns formatted headers suitable for HTTP transmission.""" strs = [] for key, value in self.to_wsgi_list(): strs.append('%s: %s' % (key, value)) strs.append('\r\n') return '\r\n'.join(strs) def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, list(self) ) class ImmutableHeadersMixin(object): """Makes a :class:`Headers` immutable. We do not mark them as hashable though since the only usecase for this datastructure in Werkzeug is a view on a mutable structure. .. versionadded:: 0.5 :private: """ def __delitem__(self, key): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) set = __setitem__ def add(self, item): is_immutable(self) remove = add_header = add def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def popitem(self): is_immutable(self) def setdefault(self, key, default): is_immutable(self) class EnvironHeaders(ImmutableHeadersMixin, Headers): """Read only version of the headers from a WSGI environment. This provides the same interface as `Headers` and is constructed from a WSGI environment. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __init__(self, environ): self.environ = environ def __eq__(self, other): return self.environ is other.environ def __getitem__(self, key, _get_mode=False): # _get_mode is a no-op for this class as there is no index but # used because get() calls it. key = key.upper().replace('-', '_') if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): return _unicodify_header_value(self.environ[key]) return _unicodify_header_value(self.environ['HTTP_' + key]) def __len__(self): # the iter is necessary because otherwise list calls our # len which would call list again and so forth. return len(list(iter(self))) def __iter__(self): for key, value in iteritems(self.environ): if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield (key[5:].replace('_', '-').title(), _unicodify_header_value(value)) elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield (key.replace('_', '-').title(), _unicodify_header_value(value)) def copy(self): raise TypeError('cannot create %r copies' % self.__class__.__name__) @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` instances as sequence and it will combine the return values of all wrapped dicts: >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict >>> post = MultiDict([('foo', 'bar')]) >>> get = MultiDict([('blub', 'blah')]) >>> combined = CombinedMultiDict([get, post]) >>> combined['foo'] 'bar' >>> combined['blub'] 'blah' This works for all read operations and will raise a `TypeError` for methods that usually change data which isn't possible. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __reduce_ex__(self, protocol): return type(self), (self.dicts,) def __init__(self, dicts=None): self.dicts = dicts or [] @classmethod def fromkeys(cls): raise TypeError('cannot create %r instances by fromkeys' % cls.__name__) def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise exceptions.BadRequestKeyError(key) def get(self, key, default=None, type=None): for d in self.dicts: if key in d: if type is not None: try: return type(d[key]) except ValueError: continue return d[key] return default def getlist(self, key, type=None): rv = [] for d in self.dicts: rv.extend(d.getlist(key, type)) return rv def keys(self): rv = set() for d in self.dicts: rv.update(d.keys()) return iter(rv) __iter__ = keys def items(self, multi=False): found = set() for d in self.dicts: for key, value in iteritems(d, multi): if multi: yield key, value elif key not in found: found.add(key) yield key, value def values(self): for key, value in iteritems(self): yield value def lists(self): rv = {} for d in self.dicts: for key, values in iterlists(d): rv.setdefault(key, []).extend(values) return iteritems(rv) def listvalues(self): return (x[1] for x in self.lists()) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self.dicts[:]) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first item for each key. :return: a :class:`dict` """ rv = {} for d in reversed(self.dicts): rv.update(d.to_dict(flat)) return rv def __len__(self): return len(self.keys()) def __contains__(self, key): for d in self.dicts: if key in d: return True return False has_key = __contains__ def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.dicts) class FileMultiDict(MultiDict): """A special :class:`MultiDict` that has convenience methods to add files to it. This is used for :class:`EnvironBuilder` and generally useful for unittesting. .. versionadded:: 0.5 """ def add_file(self, name, file, filename=None, content_type=None): """Adds a new file to the dict. `file` can be a file name or a :class:`file`-like or a :class:`FileStorage` object. :param name: the name of the field. :param file: a filename or :class:`file`-like object :param filename: an optional filename :param content_type: an optional content type """ if isinstance(file, FileStorage): value = file else: if isinstance(file, string_types): if filename is None: filename = file file = open(file, 'rb') if filename and content_type is None: content_type = mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' value = FileStorage(file, filename, name, content_type) self.add(name, value) class ImmutableDict(ImmutableDictMixin, dict): """An immutable :class:`dict`. .. versionadded:: 0.5 """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, dict.__repr__(self), ) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return dict(self) def __copy__(self): return self class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): """An immutable :class:`MultiDict`. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return MultiDict(self) def __copy__(self): return self class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): """An immutable :class:`OrderedMultiDict`. .. versionadded:: 0.6 """ def _iter_hashitems(self): return enumerate(iteritems(self, multi=True)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return OrderedMultiDict(self) def __copy__(self): return self @native_itermethods(['values']) class Accept(ImmutableList): """An :class:`Accept` object is just a list subclass for lists of ``(value, quality)`` tuples. It is automatically sorted by quality. All :class:`Accept` objects work similar to a list but provide extra functionality for working with the data. Containment checks are normalized to the rules of that header: >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) >>> a.best 'ISO-8859-1' >>> 'iso-8859-1' in a True >>> 'UTF8' in a True >>> 'utf7' in a False To get the quality for an item you can use normal item lookup: >>> print a['utf-8'] 0.7 >>> a['utf7'] 0 .. versionchanged:: 0.5 :class:`Accept` objects are forced immutable now. """ def __init__(self, values=()): if values is None: list.__init__(self) self.provided = False elif isinstance(values, Accept): self.provided = values.provided list.__init__(self, values) else: self.provided = True values = [(a, b) for b, a in values] values.sort() values.reverse() list.__init__(self, [(a, b) for b, a in values]) def _value_matches(self, value, item): """Check if a value matches a given accept item.""" return item == '*' or item.lower() == value.lower() def __getitem__(self, key): """Besides index lookup (getting item n) you can also pass it a string to get the quality for the item. If the item is not in the list, the returned quality is ``0``. """ if isinstance(key, string_types): return self.quality(key) return list.__getitem__(self, key) def quality(self, key): """Returns the quality of the key. .. versionadded:: 0.6 In previous versions you had to use the item-lookup syntax (eg: ``obj[key]`` instead of ``obj.quality(key)``) """ for item, quality in self: if self._value_matches(key, item): return quality return 0 def __contains__(self, value): for item, quality in self: if self._value_matches(value, item): return True return False def __repr__(self): return '%s([%s])' % ( self.__class__.__name__, ', '.join('(%r, %s)' % (x, y) for x, y in self) ) def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, string_types): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key) def find(self, key): """Get the position of an entry or return -1. :param key: The key to be looked up. """ try: return self.index(key) except ValueError: return -1 def values(self): """Iterate over all values.""" for item in self: yield item[0] def to_header(self): """Convert the header set into an HTTP header string.""" result = [] for value, quality in self: if quality != 1: value = '%s;q=%s' % (value, quality) result.append(value) return ','.join(result) def __str__(self): return self.to_header() def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the quality of the client. If two items have the same quality, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ best_quality = -1 result = default for server_item in matches: for client_item, quality in self: if quality <= best_quality: break if self._value_matches(server_item, client_item): best_quality = quality result = server_item return result @property def best(self): """The best match as value.""" if self: return self[0][0] class MIMEAccept(Accept): """Like :class:`Accept` but with special methods and behavior for mimetypes. """ def _value_matches(self, value, item): def _normalize(x): x = x.lower() return x == '*' and ('*', '*') or x.split('/', 1) # this is from the application which is trusted. to avoid developer # frustration we actually check these for valid values if '/' not in value: raise ValueError('invalid mimetype %r' % value) value_type, value_subtype = _normalize(value) if value_type == '*' and value_subtype != '*': raise ValueError('invalid mimetype %r' % value) if '/' not in item: return False item_type, item_subtype = _normalize(item) if item_type == '*' and item_subtype != '*': return False return ( (item_type == item_subtype == '*' or value_type == value_subtype == '*') or (item_type == value_type and (item_subtype == '*' or value_subtype == '*' or item_subtype == value_subtype)) ) @property def accept_html(self): """True if this object accepts HTML.""" return ( 'text/html' in self or 'application/xhtml+xml' in self or self.accept_xhtml ) @property def accept_xhtml(self): """True if this object accepts XHTML.""" return ( 'application/xhtml+xml' in self or 'application/xml' in self ) @property def accept_json(self): """True if this object accepts JSON.""" return 'application/json' in self class LanguageAccept(Accept): """Like :class:`Accept` but with normalization for languages.""" def _value_matches(self, value, item): def _normalize(language): return _locale_delim_re.split(language.lower()) return item == '*' or _normalize(value) == _normalize(item) class CharsetAccept(Accept): """Like :class:`Accept` but with normalization for charsets.""" def _value_matches(self, value, item): def _normalize(name): try: return codecs.lookup(name).name except LookupError: return name.lower() return item == '*' or _normalize(value) == _normalize(item) def cache_property(key, empty, type): """Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.""" return property(lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), 'accessor for %r' % key) class _CacheControl(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Cache-Control header. It has accessors for all the cache-control directives specified in RFC 2616. The class does not differentiate between request and response directives. Because the cache-control directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`CacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionchanged:: 0.4 Setting `no_cache` or `private` to boolean `True` will set the implicit none-value which is ``*``: >>> cc = ResponseCacheControl() >>> cc.no_cache = True >>> cc <ResponseCacheControl 'no-cache'> >>> cc.no_cache '*' >>> cc.no_cache = None >>> cc <ResponseCacheControl ''> In versions before 0.5 the behavior documented here affected the now no longer existing `CacheControl` class. """ no_cache = cache_property('no-cache', '*', None) no_store = cache_property('no-store', None, bool) max_age = cache_property('max-age', -1, int) no_transform = cache_property('no-transform', None, None) def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_cache_value(self, key, empty, type): """Used internally by the accessor properties.""" if type is bool: return key in self if key in self: value = self[key] if value is None: return empty elif type is not None: try: value = type(value) except ValueError: pass return value def _set_cache_value(self, key, value, type): """Used internally by the accessor properties.""" if type is bool: if value: self[key] = None else: self.pop(key, None) else: if value is None: self.pop(key) elif value is True: self[key] = None else: self[key] = value def _del_cache_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_header(self) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) class RequestCacheControl(ImmutableDictMixin, _CacheControl): """A cache control for requests. This is immutable and gives access to all the request-relevant cache control headers. To get a header of the :class:`RequestCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ max_stale = cache_property('max-stale', '*', int) min_fresh = cache_property('min-fresh', '*', int) no_transform = cache_property('no-transform', None, None) only_if_cached = cache_property('only-if-cached', None, bool) class ResponseCacheControl(_CacheControl): """A cache control for responses. Unlike :class:`RequestCacheControl` this is mutable and gives access to response-relevant cache control headers. To get a header of the :class:`ResponseCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ public = cache_property('public', None, bool) private = cache_property('private', '*', None) must_revalidate = cache_property('must-revalidate', None, bool) proxy_revalidate = cache_property('proxy-revalidate', None, bool) s_maxage = cache_property('s-maxage', None, None) # attach cache_property to the _CacheControl as staticmethod # so that others can reuse it. _CacheControl.cache_property = staticmethod(cache_property) class CallbackDict(UpdateDictMixin, dict): """A dict that calls a function passed every time something is changed. The function is passed the dict instance. """ def __init__(self, initial=None, on_update=None): dict.__init__(self, initial or ()) self.on_update = on_update def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, dict.__repr__(self) ) class HeaderSet(object): """Similar to the :class:`ETags` class this implements a set-like structure. Unlike :class:`ETags` this is case insensitive and used for vary, allow, and content-language headers. If not constructed using the :func:`parse_set_header` function the instantiation works like this: >>> hs = HeaderSet(['foo', 'bar', 'baz']) >>> hs HeaderSet(['foo', 'bar', 'baz']) """ def __init__(self, headers=None, on_update=None): self._headers = list(headers or ()) self._set = set([x.lower() for x in self._headers]) self.on_update = on_update def add(self, header): """Add a new header to the set.""" self.update((header,)) def remove(self, header): """Remove a header from the set. This raises an :exc:`KeyError` if the header is not in the set. .. versionchanged:: 0.5 In older versions a :exc:`IndexError` was raised instead of a :exc:`KeyError` if the object was missing. :param header: the header to be removed. """ key = header.lower() if key not in self._set: raise KeyError(header) self._set.remove(key) for idx, key in enumerate(self._headers): if key.lower() == header: del self._headers[idx] break if self.on_update is not None: self.on_update(self) def update(self, iterable): """Add all the headers from the iterable to the set. :param iterable: updates the set with the items from the iterable. """ inserted_any = False for header in iterable: key = header.lower() if key not in self._set: self._headers.append(header) self._set.add(key) inserted_any = True if inserted_any and self.on_update is not None: self.on_update(self) def discard(self, header): """Like :meth:`remove` but ignores errors. :param header: the header to be discarded. """ try: return self.remove(header) except KeyError: pass def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1 def index(self, header): """Return the index of the header in the set or raise an :exc:`IndexError`. :param header: the header to be looked up. """ rv = self.find(header) if rv < 0: raise IndexError(header) return rv def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self) def as_set(self, preserve_casing=False): """Return the set as real python set type. When calling this, all the items are converted to lowercase and the ordering is lost. :param preserve_casing: if set to `True` the items in the set returned will have the original case like in the :class:`HeaderSet`, otherwise they will be lowercase. """ if preserve_casing: return set(self._headers) return set(self._set) def to_header(self): """Convert the header set into an HTTP header string.""" return ', '.join(map(quote_header_value, self._headers)) def __getitem__(self, idx): return self._headers[idx] def __delitem__(self, idx): rv = self._headers.pop(idx) self._set.remove(rv.lower()) if self.on_update is not None: self.on_update(self) def __setitem__(self, idx, value): old = self._headers[idx] self._set.remove(old.lower()) self._headers[idx] = value self._set.add(value.lower()) if self.on_update is not None: self.on_update(self) def __contains__(self, header): return header.lower() in self._set def __len__(self): return len(self._set) def __iter__(self): return iter(self._headers) def __nonzero__(self): return bool(self._set) def __str__(self): return self.to_header() def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, self._headers ) class ETags(object): """A set that can be used to check if one etag is present in a collection of etags. """ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): self._strong = frozenset(not star_tag and strong_etags or ()) self._weak = frozenset(weak_etags or ()) self.star_tag = star_tag def as_set(self, include_weak=False): """Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.""" rv = set(self._strong) if include_weak: rv.update(self._weak) return rv def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak def contains_weak(self, etag): """Check if an etag is part of the set including weak and strong tags.""" return self.is_weak(etag) or self.contains(etag) def contains(self, etag): """Check if an etag is part of the set ignoring weak tags. It is also possible to use the ``in`` operator. """ if self.star_tag: return True return etag in self._strong def contains_raw(self, etag): """When passed a quoted tag it will check if this tag is part of the set. If the tag is weak it is checked against weak and strong tags, otherwise strong only.""" etag, weak = unquote_etag(etag) if weak: return self.contains_weak(etag) return self.contains(etag) def to_header(self): """Convert the etags set into a HTTP header string.""" if self.star_tag: return '*' return ', '.join( ['"%s"' % x for x in self._strong] + ['w/"%s"' % x for x in self._weak] ) def __call__(self, etag=None, data=None, include_weak=False): if [etag, data].count(None) != 1: raise TypeError('either tag or data required, but at least one') if etag is None: etag = generate_etag(data) if include_weak: if etag in self._weak: return True return etag in self._strong def __nonzero__(self): return bool(self.star_tag or self._strong or self._weak) def __str__(self): return self.to_header() def __iter__(self): return iter(self._strong) def __contains__(self, etag): return self.contains(etag) def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class IfRange(object): """Very simple object that represents the `If-Range` header in parsed form. It will either have neither a etag or date or one of either but never both. .. versionadded:: 0.7 """ def __init__(self, etag=None, date=None): #: The etag parsed and unquoted. Ranges always operate on strong #: etags so the weakness information is not necessary. self.etag = etag #: The date in parsed format or `None`. self.date = date def to_header(self): """Converts the object back into an HTTP header.""" if self.date is not None: return http_date(self.date) if self.etag is not None: return quote_etag(self.etag) return '' def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Range(object): """Represents a range header. All the methods are only supporting bytes as unit. It does store multiple ranges but :meth:`range_for_length` will only work if only one range is provided. .. versionadded:: 0.7 """ def __init__(self, units, ranges): #: The units of this range. Usually "bytes". self.units = units #: A list of ``(begin, end)`` tuples for the range header provided. #: The ranges are non-inclusive. self.ranges = ranges def range_for_length(self, length): """If the range is for bytes, the length is not None and there is exactly one range and it is satisfiable it returns a ``(start, stop)`` tuple, otherwise `None`. """ if self.units != 'bytes' or length is None or len(self.ranges) != 1: return None start, end = self.ranges[0] if end is None: end = length if start < 0: start += length if is_byte_range_valid(start, end, length): return start, min(end, length) def make_content_range(self, length): """Creates a :class:`~werkzeug.datastructures.ContentRange` object from the current range and given content length. """ rng = self.range_for_length(length) if rng is not None: return ContentRange(self.units, rng[0], rng[1], length) def to_header(self): """Converts the object back into an HTTP header.""" ranges = [] for begin, end in self.ranges: if end is None: ranges.append(begin >= 0 and '%s-' % begin or str(begin)) else: ranges.append('%s-%s' % (begin, end - 1)) return '%s=%s' % (self.units, ','.join(ranges)) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class ContentRange(object): """Represents the content range header. .. versionadded:: 0.7 """ def __init__(self, units, start, stop, length=None, on_update=None): assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self.on_update = on_update self.set(start, stop, length, units) def _callback_property(name): def fget(self): return getattr(self, name) def fset(self, value): setattr(self, name, value) if self.on_update is not None: self.on_update(self) return property(fget, fset) #: The units to use, usually "bytes" units = _callback_property('_units') #: The start point of the range or `None`. start = _callback_property('_start') #: The stop point of the range (non-inclusive) or `None`. Can only be #: `None` if also start is `None`. stop = _callback_property('_stop') #: The length of the range or `None`. length = _callback_property('_length') def set(self, start, stop, length=None, units='bytes'): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self) def unset(self): """Sets the units to `None` which indicates that the header should no longer be used. """ self.set(None, None, units=None) def to_header(self): if self.units is None: return '' if self.length is None: length = '*' else: length = self.length if self.start is None: return '%s */%s' % (self.units, length) return '%s %s-%s/%s' % ( self.units, self.start, self.stop - 1, length ) def __nonzero__(self): return self.units is not None __bool__ = __nonzero__ def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Authorization(ImmutableDictMixin, dict): """Represents an `Authorization` header sent by the client. You should not create this kind of object yourself but use it when it's returned by the `parse_authorization_header` function. This object is a dict subclass and can be altered by setting dict items but it should be considered immutable as it's returned by the client and not meant for modifications. .. versionchanged:: 0.5 This object became immutable. """ def __init__(self, auth_type, data=None): dict.__init__(self, data or {}) self.type = auth_type username = property(lambda x: x.get('username'), doc=''' The username transmitted. This is set for both basic and digest auth all the time.''') password = property(lambda x: x.get('password'), doc=''' When the authentication type is basic this is the password transmitted by the client, else `None`.''') realm = property(lambda x: x.get('realm'), doc=''' This is the server realm sent back for HTTP digest auth.''') nonce = property(lambda x: x.get('nonce'), doc=''' The nonce the server sent for digest auth, sent back by the client. A nonce should be unique for every 401 response for HTTP digest auth.''') uri = property(lambda x: x.get('uri'), doc=''' The URI from Request-URI of the Request-Line; duplicated because proxies are allowed to change the Request-Line in transit. HTTP digest auth only.''') nc = property(lambda x: x.get('nc'), doc=''' The nonce count value transmitted by clients if a qop-header is also transmitted. HTTP digest auth only.''') cnonce = property(lambda x: x.get('cnonce'), doc=''' If the server sent a qop-header in the ``WWW-Authenticate`` header, the client has to provide this value for HTTP digest auth. See the RFC for more details.''') response = property(lambda x: x.get('response'), doc=''' A string of 32 hex digits computed as defined in RFC 2617, which proves that the user knows a password. Digest auth only.''') opaque = property(lambda x: x.get('opaque'), doc=''' The opaque header from the server returned unchanged by the client. It is recommended that this string be base64 or hexadecimal data. Digest auth only.''') @property def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.""" def on_update(header_set): if not header_set and 'qop' in self: del self['qop'] elif header_set: self['qop'] = header_set.to_header() return parse_set_header(self.get('qop'), on_update) class WWWAuthenticate(UpdateDictMixin, dict): """Provides simple access to `WWW-Authenticate` headers.""" #: list of keys that require quoting in the generated header _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm']) def __init__(self, auth_type=None, values=None, on_update=None): dict.__init__(self, values or ()) if auth_type: self['__auth_type__'] = auth_type self.on_update = on_update def set_basic(self, realm='authentication required'): """Clear the auth info and enable basic auth.""" dict.clear(self) dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) if self.on_update: self.on_update(self) def set_digest(self, realm, nonce, qop=('auth',), opaque=None, algorithm=None, stale=False): """Clear the auth info and enable digest auth.""" d = { '__auth_type__': 'digest', 'realm': realm, 'nonce': nonce, 'qop': dump_header(qop) } if stale: d['stale'] = 'TRUE' if opaque is not None: d['opaque'] = opaque if algorithm is not None: d['algorithm'] = algorithm dict.clear(self) dict.update(self, d) if self.on_update: self.on_update(self) def to_header(self): """Convert the stored values into a WWW-Authenticate header.""" d = dict(self) auth_type = d.pop('__auth_type__', None) or 'basic' return '%s %s' % (auth_type.title(), ', '.join([ '%s=%s' % (key, quote_header_value(value, allow_token=key not in self._require_quoting)) for key, value in iteritems(d) ])) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) def auth_property(name, doc=None): """A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented. """ def _set_value(self, value): if value is None: self.pop(name, None) else: self[name] = str(value) return property(lambda x: x.get(name), _set_value, doc=doc) def _set_property(name, doc=None): def fget(self): def on_update(header_set): if not header_set and name in self: del self[name] elif header_set: self[name] = header_set.to_header() return parse_set_header(self.get(name), on_update) return property(fget, doc=doc) type = auth_property('__auth_type__', doc=''' The type of the auth mechanism. HTTP currently specifies `Basic` and `Digest`.''') realm = auth_property('realm', doc=''' A string to be displayed to users so they know which username and password to use. This string should contain at least the name of the host performing the authentication and might additionally indicate the collection of users who might have access.''') domain = _set_property('domain', doc=''' A list of URIs that define the protection space. If a URI is an absolute path, it is relative to the canonical root URL of the server being accessed.''') nonce = auth_property('nonce', doc=''' A server-specified data string which should be uniquely generated each time a 401 response is made. It is recommended that this string be base64 or hexadecimal data.''') opaque = auth_property('opaque', doc=''' A string of data, specified by the server, which should be returned by the client unchanged in the Authorization header of subsequent requests with URIs in the same protection space. It is recommended that this string be base64 or hexadecimal data.''') algorithm = auth_property('algorithm', doc=''' A string indicating a pair of algorithms used to produce the digest and a checksum. If this is not present it is assumed to be "MD5". If the algorithm is not understood, the challenge should be ignored (and a different one used, if there is more than one).''') qop = _set_property('qop', doc=''' A set of quality-of-privacy directives such as auth and auth-int.''') def _get_stale(self): val = self.get('stale') if val is not None: return val.lower() == 'true' def _set_stale(self, value): if value is None: self.pop('stale', None) else: self['stale'] = value and 'TRUE' or 'FALSE' stale = property(_get_stale, _set_stale, doc=''' A flag, indicating that the previous request from the client was rejected because the nonce value was stale.''') del _get_stale, _set_stale # make auth_property a staticmethod so that subclasses of # `WWWAuthenticate` can use it for new properties. auth_property = staticmethod(auth_property) del _set_property class FileStorage(object): """The :class:`FileStorage` class is a thin wrapper over incoming files. It is used by the request object to represent uploaded files. All the attributes of the wrapper stream are proxied by the file storage so it's possible to do ``storage.read()`` instead of the long form ``storage.stream.read()``. """ def __init__(self, stream=None, filename=None, name=None, content_type=None, content_length=None, headers=None): self.name = name self.stream = stream or _empty_stream # if no filename is provided we can attempt to get the filename # from the stream object passed. There we have to be careful to # skip things like <fdopen>, <stderr> etc. Python marks these # special filenames with angular brackets. if filename is None: filename = getattr(stream, 'name', None) s = make_literal_wrapper(filename) if filename and filename[0] == s('<') and filename[-1] == s('>'): filename = None # On Python 3 we want to make sure the filename is always unicode. # This might not be if the name attribute is bytes due to the # file being opened from the bytes API. if not PY2 and isinstance(filename, bytes): filename = filename.decode(sys.getfilesystemencoding(), 'replace') self.filename = filename if headers is None: headers = Headers() self.headers = headers if content_type is not None: headers['Content-Type'] = content_type if content_length is not None: headers['Content-Length'] = str(content_length) def _parse_content_type(self): if not hasattr(self, '_parsed_content_type'): self._parsed_content_type = \ parse_options_header(self.content_type) @property def content_type(self): """The content-type sent in the header. Usually not available""" return self.headers.get('content-type') @property def content_length(self): """The content-length sent in the header. Usually not available""" return int(self.headers.get('content-length') or 0) @property def mimetype(self): """Like :attr:`content_type` but without parameters (eg, without charset, type etc.). For example if the content type is ``text/html; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[0] @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[1] def save(self, dst, buffer_size=16384): """Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename or open file object the uploaded file is saved to. :param buffer_size: the size of the buffer. This works the same as the `length` parameter of :func:`shutil.copyfileobj`. """ from shutil import copyfileobj close_dst = False if isinstance(dst, string_types): dst = open(dst, 'wb') close_dst = True try: copyfileobj(self.stream, dst, buffer_size) finally: if close_dst: dst.close() def close(self): """Close the underlying file if possible.""" try: self.stream.close() except Exception: pass def __nonzero__(self): return bool(self.filename) def __getattr__(self, name): return getattr(self.stream, name) def __iter__(self): return iter(self.readline, '') def __repr__(self): return '<%s: %r (%r)>' % ( self.__class__.__name__, self.filename, self.content_type ) # circular dependencies from werkzeug.http import dump_options_header, dump_header, generate_etag, \ quote_header_value, parse_set_header, unquote_etag, quote_etag, \ parse_options_header, http_date, is_byte_range_valid from werkzeug import exceptions
apache-2.0
moses-palmer/slimit
src/slimit/visitors/ecmavisitor.py
1
12856
############################################################################### # # Copyright (c) 2011 Ruslan Spivak # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### __author__ = 'Ruslan Spivak <[email protected]>' from slimit import ast class ECMAVisitor(object): def __init__(self): self.indent_level = 0 def _make_indent(self): return ' ' * self.indent_level def visit(self, node): method = 'visit_%s' % node.__class__.__name__ return getattr(self, method, self.generic_visit)(node) def generic_visit(self, node): return 'GEN: %r' % node def visit_Program(self, node): return '\n'.join(self.visit(child) for child in node) def visit_Block(self, node): s = '{\n' self.indent_level += 2 s += '\n'.join( self._make_indent() + self.visit(child) for child in node) self.indent_level -= 2 s += '\n' + self._make_indent() + '}' return s def visit_VarStatement(self, node): s = 'var %s;' % ', '.join(self.visit(child) for child in node) return s def visit_VarDecl(self, node): output = [] output.append(self.visit(node.identifier)) if node.initializer is not None: output.append(' = %s' % self.visit(node.initializer)) return ''.join(output) def visit_Identifier(self, node): return node.value def visit_Assign(self, node): if node.op == ':': template = '%s%s %s' else: template = '%s %s %s' if getattr(node, '_parens', False): template = '(%s)' % template return template % ( self.visit(node.left), node.op, self.visit(node.right)) def visit_GetPropAssign(self, node): template = 'get %s() {\n%s\n%s}' if getattr(node, '_parens', False): template = '(%s)' % template self.indent_level += 2 body = '\n'.join( (self._make_indent() + self.visit(el)) for el in node.elements ) self.indent_level -= 2 tail = self._make_indent() return template % (self.visit(node.prop_name), body, tail) def visit_SetPropAssign(self, node): template = 'set %s(%s) {\n%s\n%s}' if getattr(node, '_parens', False): template = '(%s)' % template if len(node.parameters) > 1: raise SyntaxError( 'Setter functions must have one argument: %s' % node) params = ','.join(self.visit(param) for param in node.parameters) self.indent_level += 2 body = '\n'.join( (self._make_indent() + self.visit(el)) for el in node.elements ) self.indent_level -= 2 tail = self._make_indent() return template % (self.visit(node.prop_name), params, body, tail) def visit_Number(self, node): return node.value def visit_Comma(self, node): s = '%s, %s' % (self.visit(node.left), self.visit(node.right)) if getattr(node, '_parens', False): s = '(' + s + ')' return s def visit_EmptyStatement(self, node): return node.value def visit_If(self, node): s = 'if (' if node.predicate is not None: s += self.visit(node.predicate) s += ') ' s += self.visit(node.consequent) if node.alternative is not None: s += ' else ' s += self.visit(node.alternative) return s def visit_Boolean(self, node): return node.value def visit_For(self, node): s = 'for (' if node.init is not None: s += self.visit(node.init) if node.init is None: s += ' ; ' elif isinstance(node.init, (ast.Assign, ast.Comma, ast.FunctionCall, ast.UnaryOp, ast.Identifier, ast.BinOp, ast.Conditional, ast.Regex, ast.NewExpr)): s += '; ' else: s += ' ' if node.cond is not None: s += self.visit(node.cond) s += '; ' if node.count is not None: s += self.visit(node.count) s += ') ' + self.visit(node.statement) return s def visit_ForIn(self, node): if isinstance(node.item, ast.VarDecl): template = 'for (var %s in %s) ' else: template = 'for (%s in %s) ' s = template % (self.visit(node.item), self.visit(node.iterable)) s += self.visit(node.statement) return s def visit_BinOp(self, node): if getattr(node, '_parens', False): template = '(%s %s %s)' else: template = '%s %s %s' return template % ( self.visit(node.left), node.op, self.visit(node.right)) def visit_UnaryOp(self, node): s = self.visit(node.value) if node.postfix: s += node.op elif node.op in ('delete', 'void', 'typeof'): s = '%s %s' % (node.op, s) else: s = '%s%s' % (node.op, s) if getattr(node, '_parens', False): s = '(%s)' % s return s def visit_ExprStatement(self, node): return '%s;' % self.visit(node.expr) def visit_DoWhile(self, node): s = 'do ' s += self.visit(node.statement) s += ' while (%s);' % self.visit(node.predicate) return s def visit_While(self, node): s = 'while (%s) ' % self.visit(node.predicate) s += self.visit(node.statement) return s def visit_Null(self, node): return 'null' def visit_String(self, node): return node.value def visit_Continue(self, node): if node.identifier is not None: s = 'continue %s;' % self.visit_Identifier(node.identifier) else: s = 'continue;' return s def visit_Break(self, node): if node.identifier is not None: s = 'break %s;' % self.visit_Identifier(node.identifier) else: s = 'break;' return s def visit_Return(self, node): if node.expr is None: return 'return;' else: return 'return %s;' % self.visit(node.expr) def visit_With(self, node): s = 'with (%s) ' % self.visit(node.expr) s += self.visit(node.statement) return s def visit_Label(self, node): s = '%s: %s' % ( self.visit(node.identifier), self.visit(node.statement)) return s def visit_Switch(self, node): s = 'switch (%s) {\n' % self.visit(node.expr) self.indent_level += 2 for case in node.cases: s += self._make_indent() + self.visit_Case(case) if node.default is not None: s += self.visit_Default(node.default) self.indent_level -= 2 s += self._make_indent() + '}' return s def visit_Case(self, node): s = 'case %s:\n' % self.visit(node.expr) self.indent_level += 2 elements = '\n'.join(self._make_indent() + self.visit(element) for element in node.elements) if elements: s += elements + '\n' self.indent_level -= 2 return s def visit_Default(self, node): s = self._make_indent() + 'default:\n' self.indent_level += 2 s += '\n'.join(self._make_indent() + self.visit(element) for element in node.elements) if node.elements is not None: s += '\n' self.indent_level -= 2 return s def visit_Throw(self, node): s = 'throw %s;' % self.visit(node.expr) return s def visit_Debugger(self, node): return '%s;' % node.value def visit_Try(self, node): s = 'try ' s += self.visit(node.statements) if node.catch is not None: s += ' ' + self.visit(node.catch) if node.fin is not None: s += ' ' + self.visit(node.fin) return s def visit_Catch(self, node): s = 'catch (%s) %s' % ( self.visit(node.identifier), self.visit(node.elements)) return s def visit_Finally(self, node): s = 'finally %s' % self.visit(node.elements) return s def visit_FuncDecl(self, node): self.indent_level += 2 elements = '\n'.join(self._make_indent() + self.visit(element) for element in node.elements) self.indent_level -= 2 s = 'function %s(%s) {\n%s' % ( self.visit(node.identifier), ', '.join(self.visit(param) for param in node.parameters), elements, ) s += '\n' + self._make_indent() + '}' return s def visit_FuncExpr(self, node): self.indent_level += 2 elements = '\n'.join(self._make_indent() + self.visit(element) for element in node.elements) self.indent_level -= 2 ident = node.identifier ident = '' if ident is None else ' %s' % self.visit(ident) header = 'function%s(%s)' if getattr(node, '_parens', False): header = '(' + header s = (header + ' {\n%s') % ( ident, ', '.join(self.visit(param) for param in node.parameters), elements, ) s += '\n' + self._make_indent() + '}' if getattr(node, '_parens', False): s += ')' return s def visit_Conditional(self, node): if getattr(node, '_parens', False): template = '(%s ? %s : %s)' else: template = '%s ? %s : %s' s = template % ( self.visit(node.predicate), self.visit(node.consequent), self.visit(node.alternative)) return s def visit_Regex(self, node): if getattr(node, '_parens', False): return '(%s)' % node.value else: return node.value def visit_NewExpr(self, node): s = 'new %s(%s)' % ( self.visit(node.identifier), ', '.join(self.visit(arg) for arg in node.args) ) return s def visit_DotAccessor(self, node): if getattr(node, '_parens', False): template = '(%s.%s)' else: template = '%s.%s' left = self.visit(node.node) if isinstance(node.node, ast.Number): left = '(%s)' % left s = template % (left, self.visit(node.identifier)) return s def visit_BracketAccessor(self, node): s = '%s[%s]' % (self.visit(node.node), self.visit(node.expr)) return s def visit_FunctionCall(self, node): s = '%s(%s)' % (self.visit(node.identifier), ', '.join(self.visit(arg) for arg in node.args)) if getattr(node, '_parens', False): s = '(' + s + ')' return s def visit_Object(self, node): s = '{\n' self.indent_level += 2 s += ',\n'.join(self._make_indent() + self.visit(prop) for prop in node.properties) self.indent_level -= 2 if node.properties: s += '\n' s += self._make_indent() + '}' return s def visit_Array(self, node): s = '[' length = len(node.items) - 1 for index, item in enumerate(node.items): if isinstance(item, ast.Elision): s += ',' elif index != length: s += self.visit(item) + ',' else: s += self.visit(item) s += ']' return s def visit_This(self, node): return 'this'
mit
FedeMPouzols/Savu
savu/plugins/loaders/multi_modal_loaders/i18_loaders/i18xrd_loader.py
1
5937
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: I18stxm_loader :platform: Unix :synopsis: A class for loading I18's stxm data .. moduleauthor:: Aaron Parsons <[email protected]> """ from savu.plugins.loaders.multi_modal_loaders.base_i18_multi_modal_loader \ import BaseI18MultiModalLoader from savu.data.data_structures.data_type import FabIO from savu.plugins.utils import register_plugin import h5py import tempfile import logging import math import os import savu.test.test_utils as tu @register_plugin class I18xrdLoader(BaseI18MultiModalLoader): """ A class to load tomography data from an NXstxm file :param data_path: Path to the folder containing the \ data. Default: 'Savu/test_data/data/image_test/tiffs'. :param calibration_path: path to the calibration \ file. Default: "Savu/test_data/data/LaB6_calibration_output.nxs". """ def __init__(self, name='I18xrdLoader'): super(I18xrdLoader, self).__init__(name) def setup(self): """ Define the input nexus file :param path: The full path of the NeXus file to load. :type path: str """ data_obj = self.multi_modal_setup('xrd') scan_pattern = self.parameters['scan_pattern'] frame_dim = range(len(scan_pattern)) shape = [] for pattern in self.parameters['scan_pattern']: if pattern == 'rotation': pattern = 'rotation_angle' shape.append(len(data_obj.meta_data.get_meta_data(pattern))) path = self.get_path('data_path')#self.parameters['data_path'] data_obj.data = FabIO(path, data_obj, frame_dim, shape=tuple(shape)) # dummy file filename = path.split('/')[-1] + '.h5' data_obj.backing_file = \ h5py.File(tempfile.mkdtemp() + '/' + filename, 'a') data_obj.set_shape(data_obj.data.get_shape()) self.set_motors(data_obj, 'xrd') self.add_patterns_based_on_acquisition(data_obj, 'xrd') self.set_data_reduction_params(data_obj) calibrationfile = h5py.File(self.get_path('calibration_path'), 'r') # lets just make this all in meters and convert for pyfai in the base integrator try: logging.debug('testing the version of the calibration file') det_str = 'entry1/instrument/detector' mData = data_obj.meta_data xpix = calibrationfile[det_str + '/detector_module/fast_pixel_direction'].value*1e-3 # in metres mData.set_meta_data("x_pixel_size",xpix) mData.set_meta_data("beam_center_x", calibrationfile[det_str + '/beam_center_x'].value*1e-3) #in metres mData.set_meta_data("beam_center_y", calibrationfile[det_str + '/beam_center_y'].value*1e-3) # in metres mData.set_meta_data("distance", calibrationfile[det_str + '/distance'].value*1e-3) # in metres mData.set_meta_data("incident_wavelength", calibrationfile['/entry1/calibration_sample/beam' '/incident_wavelength'].value*1e-10) # in metres mData.set_meta_data("yaw", -calibrationfile[det_str + '/transformations/euler_b'].value)# in degrees mData.set_meta_data("roll",calibrationfile[det_str + '/transformations/euler_c'].value-180.0)# in degrees logging.debug('.... its the version in DAWN 2.0') except KeyError: try: det_str = 'entry/instrument/detector' mData = data_obj.meta_data xpix = calibrationfile[det_str + '/x_pixel_size'].value * 1e-3 mData.set_meta_data("x_pixel_size", xpix) # in metres mData.set_meta_data("beam_center_x", calibrationfile[det_str + '/beam_center_x'].value*xpix)# in metres mData.set_meta_data("beam_center_y", calibrationfile[det_str + '/beam_center_y'].value*xpix) # in metres mData.set_meta_data("distance", calibrationfile[det_str + '/distance'].value*1e-3) # in metres mData.set_meta_data("incident_wavelength", calibrationfile['/entry/calibration_sample/beam' '/incident_wavelength'].value*1e-10)# in metres orien = calibrationfile[det_str + '/detector_orientation'][...].reshape((3, 3)) yaw = math.degrees(-math.atan2(orien[2, 0], orien[2, 2]))# in degrees roll = math.degrees(-math.atan2(orien[0, 1], orien[1, 1]))# in degrees mData.set_meta_data("yaw", -yaw) mData.set_meta_data("roll", roll) logging.debug('.... its the legacy version pre-DAWN 2.0') except KeyError: logging.warn("We don't know what type of calibration file this is") self.set_data_reduction_params(data_obj) calibrationfile.close() def get_path(self,field): path = self.parameters[field] if path.split(os.sep)[0] == 'Savu': path = tu.get_test_data_path(path.split('/test_data/data')[1]) return path
gpl-3.0
zitouni/ieee_802-15-4_868-900
examples/usrpN210/tools/crc16.py
7
1996
#!/usr/bin/env python """ Translation from a C code posted to a forum on the Internet. @translator Thomas Schmid """ from array import array def reflect(crc, bitnum): # reflects the lower 'bitnum' bits of 'crc' j=1 crcout=0 for b in range(bitnum): i=1<<(bitnum-1-b) if crc & i: crcout |= j j <<= 1 return crcout def crcbitbybit(p): # bit by bit algorithm with augmented zero bytes. crc = 0 for i in range(len(p)): c = p[i] c = reflect(ord(c), 8) j=0x80 for b in range(16): bit = crc & 0x8000 crc <<= 1 crc &=0xFFFF if c & j: crc |= 1 if bit: crc ^= 0x1021 j>>=1 if j == 0: break for i in range(16): bit = crc & 0x8000 crc <<= 1 if bit: crc ^= 0x1021 crc = reflect(crc, 16) return crc class CRC16(object): """ Class interface, like the Python library's cryptographic hash functions (which CRC's are definitely not.) """ def __init__(self, string=''): self.val = 0 if string: self.update(string) def update(self, string): self.val = crcbitbybit(string) def checksum(self): return chr(self.val >> 8) + chr(self.val & 0xff) def intchecksum(self): return self.val def hexchecksum(self): return '%04x' % self.val def copy(self): clone = CRC16() clone.val = self.val return clone crc = CRC16() #crc.update("123456789") import struct crc.update(struct.pack("20B", 0x1, 0x88, 0xe5, 0xff, 0xff, 0xff, 0xff, 0x10, 0x0, 0x10, 0x0, 0x1, 0x80, 0x80, 0xff, 0xff, 0x10, 0x0, 0x20, 0x0)) assert crc.checksum() == '\x02\x82'
gpl-3.0
kelemetry/beacon
vendor/github.com/ugorji/go/codec/test.py
107
4029
#!/usr/bin/env python # This will create golden files in a directory passed to it. # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). # Ensure msgpack-python and cbor are installed first, using: # sudo apt-get install python-dev # sudo apt-get install python-pip # pip install --user msgpack-python msgpack-rpc-python cbor # Ensure all "string" keys are utf strings (else encoded as bytes) import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type l0 = [ -8, -1616, -32323232, -6464646464646464, 192, 1616, 32323232, 6464646464646464, 192, -3232.0, -6464646464.0, 3232.0, 6464.0, 6464646464.0, False, True, u"null", None, u"some&day>some<day", 1328176922000002000, u"", -2206187877999998000, u"bytestring", 270, u"none", -2013855847999995777, #-6795364578871345152, ] l1 = [ { "true": True, "false": False }, { "true": u"True", "false": False, "uint16(1616)": 1616 }, { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], "int32":32323232, "bool": True, "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", "SHORT STRING": u"1234567890" }, { True: "true", 138: False, "false": 200 } ] l = [] l.extend(l0) l.append(l0) l.append(1) l.extend(l1) return l def build_test_data(destdir): l = get_test_data_list() for i in range(len(l)): # packer = msgpack.Packer() serialized = msgpack.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') f.write(serialized) f.close() serialized = cbor.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') f.write(serialized) f.close() def doRpcServer(port, stopTimeSec): class EchoHandler(object): def Echo123(self, msg1, msg2, msg3): return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) def EchoStruct(self, msg): return ("%s" % msg) addr = msgpackrpc.Address('localhost', port) server = msgpackrpc.Server(EchoHandler()) server.listen(addr) # run thread to stop it after stopTimeSec seconds if > 0 if stopTimeSec > 0: def myStopRpcServer(): server.stop() t = threading.Timer(stopTimeSec, myStopRpcServer) t.start() server.start() def doRpcClientToPythonSvc(port): address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doMain(args): if len(args) == 2 and args[0] == "testdata": build_test_data(args[1]) elif len(args) == 3 and args[0] == "rpc-server": doRpcServer(int(args[1]), int(args[2])) elif len(args) == 2 and args[0] == "rpc-client-python-service": doRpcClientToPythonSvc(int(args[1])) elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": doMain(sys.argv[1:])
apache-2.0
BrainDamage/Flexget
flexget/plugins/search_ptn.py
4
4667
from __future__ import unicode_literals, division, absolute_import import logging from requests.auth import AuthBase from flexget import plugin from flexget.entry import Entry from flexget.event import event from flexget.utils import requests from flexget.utils.imdb import extract_id from flexget.utils.soup import get_soup from flexget.utils.search import torrent_availability log = logging.getLogger('search_ptn') class CookieAuth(AuthBase): def __init__(self, cookies): self.cookies = cookies def __call__(self, r): r.prepare_cookies(self.cookies) return r categories = { '1080p': 'c5', '720p': 'c6', 'bdrip': 'c10', 'bluray': 'c1', 'brrip': 'c11', 'dvdr': 'c4', 'dvdrip': 'c12', 'mp4': 'c16', 'ost/flac': 'c17', 'ost/mp3': 'c18', 'packs': 'c20', 'r5/scr': 'c13', 'remux': 'c2', 'tvrip': 'c15', 'webrip': 'c14' } class SearchPTN(object): schema = { 'type': 'object', 'properties': { 'username': {'type': 'string'}, 'login_key': {'type': 'string'}, 'password': {'type': 'string'}, 'categories': { 'type': 'array', 'items': {'type': 'string', 'enum': list(categories)} } }, 'required': ['username', 'login_key', 'password'], 'additionalProperties': False } def search(self, entry, config): login_sess = requests.Session() login_params = {'username': config['username'], 'password': config['password'], 'loginkey': config['login_key']} try: login_sess.post('https://piratethenet.org/takelogin.php', data=login_params, verify=False) except requests.RequestException as e: log.error('Error while logging in to PtN: %s', e) download_auth = CookieAuth(login_sess.cookies) # Default to searching by title (0=title 3=imdb_id) search_by = 0 if 'imdb_id' in entry: searches = [entry['imdb_id']] search_by = 3 elif 'movie_name' in entry: search = entry['movie_name'] if 'movie_year' in entry: search += ' %s' % entry['movie_year'] searches = [search] else: searches = entry.get('search_strings', [entry['title']]) params = {'_by': search_by} if config.get('categories'): for cat in config['categories']: params[categories[cat]] = 1 results = set() for search in searches: params['search'] = search try: r = login_sess.get('http://piratethenet.org/browse.php', params=params) except requests.RequestException as e: log.error('Error searching ptn: %s' % e) continue soup = get_soup(r.text) if 'login' in soup.head.title.text.lower(): log.error('PtN cookie info invalid') raise plugin.PluginError('PTN cookie info invalid') try: results_table = soup.find_all('table', attrs={'class': 'main'}, limit=2)[1] except IndexError: log.debug('no results found for `%s`' % search) continue for row in results_table.find_all('tr')[1:]: columns = row.find_all('td') entry = Entry() links = columns[1].find_all('a', recursive=False, limit=2) entry['title'] = links[0].text if len(links) > 1: entry['imdb_id'] = extract_id(links[1].get('href')) entry['url'] = 'http://piratethenet.org/' + columns[2].a.get('href') entry['download_auth'] = download_auth entry['torrent_seeds'] = int(columns[8].text) entry['torrent_leeches'] = int(columns[9].text) entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches']) size = columns[6].find('br').previous_sibling unit = columns[6].find('br').next_sibling if unit == 'GB': entry['content_size'] = int(float(size) * 1024) elif unit == 'MB': entry['content_size'] = int(float(size)) elif unit == 'KB': entry['content_size'] = int(float(size) / 1024) results.add(entry) return results @event('plugin.register') def register_plugin(): plugin.register(SearchPTN, 'ptn', groups=['search'], api_ver=2)
mit
map222/spark
examples/src/main/python/ml/n_gram_example.py
123
1545
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.feature import NGram # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("NGramExample")\ .getOrCreate() # $example on$ wordDataFrame = spark.createDataFrame([ (0, ["Hi", "I", "heard", "about", "Spark"]), (1, ["I", "wish", "Java", "could", "use", "case", "classes"]), (2, ["Logistic", "regression", "models", "are", "neat"]) ], ["id", "words"]) ngram = NGram(n=2, inputCol="words", outputCol="ngrams") ngramDataFrame = ngram.transform(wordDataFrame) ngramDataFrame.select("ngrams").show(truncate=False) # $example off$ spark.stop()
apache-2.0
t0in4/django
tests/check_framework/test_multi_db.py
191
1682
from django.db import connections, models from django.test import TestCase, mock from django.test.utils import override_settings from .tests import IsolateModelsMixin class TestRouter(object): """ Routes to the 'other' database if the model name starts with 'Other'. """ def allow_migrate(self, db, app_label, model=None, **hints): return db == ('other' if model._meta.verbose_name.startswith('other') else 'default') @override_settings(DATABASE_ROUTERS=[TestRouter()]) class TestMultiDBChecks(IsolateModelsMixin, TestCase): multi_db = True def _patch_check_field_on(self, db): return mock.patch.object(connections[db].validation, 'check_field') def test_checks_called_on_the_default_database(self): class Model(models.Model): field = models.CharField(max_length=100) model = Model() with self._patch_check_field_on('default') as mock_check_field_default: with self._patch_check_field_on('other') as mock_check_field_other: model.check() self.assertTrue(mock_check_field_default.called) self.assertFalse(mock_check_field_other.called) def test_checks_called_on_the_other_database(self): class OtherModel(models.Model): field = models.CharField(max_length=100) model = OtherModel() with self._patch_check_field_on('other') as mock_check_field_other: with self._patch_check_field_on('default') as mock_check_field_default: model.check() self.assertTrue(mock_check_field_other.called) self.assertFalse(mock_check_field_default.called)
bsd-3-clause
Ervii/garage-time
garage/src/python/pants/backend/jvm/tasks/jvm_compile/anonymizer.py
2
4846
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) import base64 import os import random import re _default_keep_words = [ 'AAAAAAAAAAA=', 'analysis', 'anonfun', 'apply', 'beta', 'class', 'classes', 'com', 'd', 'home', 'jar', 'jars', 'java', 'javac', 'jvm', 'lib', 'library', 'pants', 'rt', 'scala', 'scalac', 'src', 'unapply', 'users', 'web' ] _default_word_map = { 'foursquare': 'acme', 'benjy': 'kermit' } # TODO: Move somewhere more general? Could also be used to anonymize source files. class Anonymizer(object): """Anonymizes names in analysis files. Will replace all words in word_map with the corresponding value. Will replace all other words with a random word from word_list, except for words in keep. Replacements are 1:1, and therefore invertible. Useful for obfuscating real-life analysis files so we can use them in tests without leaking proprietary information. """ # Utility method for anonymizing base64-encoded binary data in analysis files. @staticmethod def _random_base64_string(): n = random.randint(20, 200) return base64.b64encode(os.urandom(n)) # Break on delimiters (digits, space, forward slash, dash, underscore, dollar, period) and on # upper-case letters. _DELIMITER = r'\d|\s|/|-|_|\$|\.' _UPPER = r'[A-Z]' _UPPER_CASE_RE = re.compile(r'^%s$' % _UPPER) _DELIMITER_RE = re.compile(r'^%s$' % _DELIMITER) _BREAK_ON_RE = re.compile(r'(%s|%s)' % (_DELIMITER, _UPPER)) # Capture what we broke on. # Valid replacement words must be all lower-case letters, with no apostrophes etc. _WORD_RE = re.compile(r'^[a-z]+$') def __init__(self, word_list, word_map=None, keep=None, strict=False): self._translations = {} self._reverse_translations = {} # Init from args. for k, v in (_default_word_map if word_map is None else word_map).items(): self._add_translation(k, v) for w in _default_keep_words if keep is None else keep: self._add_translation(w, w) # Prepare list of candidate translations. self._unused_words = list( set(filter(Anonymizer._WORD_RE.match, word_list)) - set(self._translations.values()) - set(self._translations.keys())) random.shuffle(self._unused_words) self._strict = strict # If we're not strict and we run out of replacement words, we count how many more words # we need, so we can give a useful error message to that effect. self._words_needed = 0 def words_needed(self): return self._words_needed def check_for_comprehensiveness(self): if self._words_needed: raise Exception('Need %d more words in word_list for full anonymization.' % self._words_needed) def convert(self, s): parts = Anonymizer._BREAK_ON_RE.split(s) parts_iter = iter(parts) converted_parts = [] for part in parts_iter: if part == '' or Anonymizer._DELIMITER_RE.match(part): converted_parts.append(part) elif Anonymizer._UPPER_CASE_RE.match(part): # Join to the rest of the word, if any. token = part try: token += parts_iter.next() except StopIteration: pass converted_parts.append(self._convert_single_token(token)) else: converted_parts.append(self._convert_single_token(part)) return ''.join(converted_parts) def convert_base64_string(self, s): translation = self._translations.get(s) if translation is None: translation = Anonymizer._random_base64_string() self._add_translation(s, translation) return translation def _convert_single_token(self, token): lower = token.lower() translation = self._translations.get(lower) if translation is None: if not self._unused_words: if self._strict: raise Exception('Ran out of words to translate to.') else: self._words_needed += 1 translation = lower else: translation = self._unused_words.pop() self._add_translation(lower, translation) # Use the same capitalization as the original word. if token[0].isupper(): return translation.capitalize() else: return translation def _add_translation(self, frm, to): if frm in self._translations: raise Exception('Word already has translation: %s -> %s' % (frm, self._translations[frm])) if to in self._reverse_translations: raise Exception('Translation target already used: %s -> %s' % (self._reverse_translations[to], to)) self._translations[frm] = to self._reverse_translations[to] = frm
apache-2.0
llooker/python_sdk
lookerapi/models/lookml_model_explore.py
1
28033
# coding: utf-8 """ Looker API 3.0 Reference ### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) OpenAPI spec version: 3.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class LookmlModelExplore(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, id=None, name=None, description=None, label=None, scopes=None, can_total=None, can_save=None, can_explain=None, can_pivot_in_db=None, has_timezone_support=None, supports_cost_estimate=None, connection_name=None, null_sort_treatment=None, files=None, source_file=None, project_name=None, model_name=None, view_name=None, hidden=None, sql_table_name=None, access_filter_fields=None, access_filters=None, aliases=None, always_filter=None, conditionally_filter=None, index_fields=None, sets=None, errors=None, fields=None, joins=None, group_label=None, supported_measure_types=None): """ LookmlModelExplore - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'str', 'name': 'str', 'description': 'str', 'label': 'str', 'scopes': 'list[str]', 'can_total': 'bool', 'can_save': 'bool', 'can_explain': 'bool', 'can_pivot_in_db': 'bool', 'has_timezone_support': 'bool', 'supports_cost_estimate': 'bool', 'connection_name': 'str', 'null_sort_treatment': 'str', 'files': 'list[str]', 'source_file': 'str', 'project_name': 'str', 'model_name': 'str', 'view_name': 'str', 'hidden': 'bool', 'sql_table_name': 'str', 'access_filter_fields': 'list[str]', 'access_filters': 'list[LookmlModelExploreAccessFilter]', 'aliases': 'list[LookmlModelExploreAlias]', 'always_filter': 'list[LookmlModelExploreAlwaysFilter]', 'conditionally_filter': 'list[LookmlModelExploreConditionallyFilter]', 'index_fields': 'list[str]', 'sets': 'list[LookmlModelExploreSet]', 'errors': 'list[LookmlModelExploreError]', 'fields': 'LookmlModelExploreFieldset', 'joins': 'list[LookmlModelExploreJoins]', 'group_label': 'str', 'supported_measure_types': 'list[LookmlModelExploreSupportedMeasureType]' } self.attribute_map = { 'id': 'id', 'name': 'name', 'description': 'description', 'label': 'label', 'scopes': 'scopes', 'can_total': 'can_total', 'can_save': 'can_save', 'can_explain': 'can_explain', 'can_pivot_in_db': 'can_pivot_in_db', 'has_timezone_support': 'has_timezone_support', 'supports_cost_estimate': 'supports_cost_estimate', 'connection_name': 'connection_name', 'null_sort_treatment': 'null_sort_treatment', 'files': 'files', 'source_file': 'source_file', 'project_name': 'project_name', 'model_name': 'model_name', 'view_name': 'view_name', 'hidden': 'hidden', 'sql_table_name': 'sql_table_name', 'access_filter_fields': 'access_filter_fields', 'access_filters': 'access_filters', 'aliases': 'aliases', 'always_filter': 'always_filter', 'conditionally_filter': 'conditionally_filter', 'index_fields': 'index_fields', 'sets': 'sets', 'errors': 'errors', 'fields': 'fields', 'joins': 'joins', 'group_label': 'group_label', 'supported_measure_types': 'supported_measure_types' } self._id = id self._name = name self._description = description self._label = label self._scopes = scopes self._can_total = can_total self._can_save = can_save self._can_explain = can_explain self._can_pivot_in_db = can_pivot_in_db self._has_timezone_support = has_timezone_support self._supports_cost_estimate = supports_cost_estimate self._connection_name = connection_name self._null_sort_treatment = null_sort_treatment self._files = files self._source_file = source_file self._project_name = project_name self._model_name = model_name self._view_name = view_name self._hidden = hidden self._sql_table_name = sql_table_name self._access_filter_fields = access_filter_fields self._access_filters = access_filters self._aliases = aliases self._always_filter = always_filter self._conditionally_filter = conditionally_filter self._index_fields = index_fields self._sets = sets self._errors = errors self._fields = fields self._joins = joins self._group_label = group_label self._supported_measure_types = supported_measure_types @property def id(self): """ Gets the id of this LookmlModelExplore. Fully qualified name model plus explore name :return: The id of this LookmlModelExplore. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this LookmlModelExplore. Fully qualified name model plus explore name :param id: The id of this LookmlModelExplore. :type: str """ self._id = id @property def name(self): """ Gets the name of this LookmlModelExplore. Explore name :return: The name of this LookmlModelExplore. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this LookmlModelExplore. Explore name :param name: The name of this LookmlModelExplore. :type: str """ self._name = name @property def description(self): """ Gets the description of this LookmlModelExplore. Description :return: The description of this LookmlModelExplore. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this LookmlModelExplore. Description :param description: The description of this LookmlModelExplore. :type: str """ self._description = description @property def label(self): """ Gets the label of this LookmlModelExplore. Label :return: The label of this LookmlModelExplore. :rtype: str """ return self._label @label.setter def label(self, label): """ Sets the label of this LookmlModelExplore. Label :param label: The label of this LookmlModelExplore. :type: str """ self._label = label @property def scopes(self): """ Gets the scopes of this LookmlModelExplore. Scopes :return: The scopes of this LookmlModelExplore. :rtype: list[str] """ return self._scopes @scopes.setter def scopes(self, scopes): """ Sets the scopes of this LookmlModelExplore. Scopes :param scopes: The scopes of this LookmlModelExplore. :type: list[str] """ self._scopes = scopes @property def can_total(self): """ Gets the can_total of this LookmlModelExplore. Can Total :return: The can_total of this LookmlModelExplore. :rtype: bool """ return self._can_total @can_total.setter def can_total(self, can_total): """ Sets the can_total of this LookmlModelExplore. Can Total :param can_total: The can_total of this LookmlModelExplore. :type: bool """ self._can_total = can_total @property def can_save(self): """ Gets the can_save of this LookmlModelExplore. Can Save :return: The can_save of this LookmlModelExplore. :rtype: bool """ return self._can_save @can_save.setter def can_save(self, can_save): """ Sets the can_save of this LookmlModelExplore. Can Save :param can_save: The can_save of this LookmlModelExplore. :type: bool """ self._can_save = can_save @property def can_explain(self): """ Gets the can_explain of this LookmlModelExplore. Can Explain :return: The can_explain of this LookmlModelExplore. :rtype: bool """ return self._can_explain @can_explain.setter def can_explain(self, can_explain): """ Sets the can_explain of this LookmlModelExplore. Can Explain :param can_explain: The can_explain of this LookmlModelExplore. :type: bool """ self._can_explain = can_explain @property def can_pivot_in_db(self): """ Gets the can_pivot_in_db of this LookmlModelExplore. Can pivot in the DB :return: The can_pivot_in_db of this LookmlModelExplore. :rtype: bool """ return self._can_pivot_in_db @can_pivot_in_db.setter def can_pivot_in_db(self, can_pivot_in_db): """ Sets the can_pivot_in_db of this LookmlModelExplore. Can pivot in the DB :param can_pivot_in_db: The can_pivot_in_db of this LookmlModelExplore. :type: bool """ self._can_pivot_in_db = can_pivot_in_db @property def has_timezone_support(self): """ Gets the has_timezone_support of this LookmlModelExplore. Has timezone support :return: The has_timezone_support of this LookmlModelExplore. :rtype: bool """ return self._has_timezone_support @has_timezone_support.setter def has_timezone_support(self, has_timezone_support): """ Sets the has_timezone_support of this LookmlModelExplore. Has timezone support :param has_timezone_support: The has_timezone_support of this LookmlModelExplore. :type: bool """ self._has_timezone_support = has_timezone_support @property def supports_cost_estimate(self): """ Gets the supports_cost_estimate of this LookmlModelExplore. Cost estimates supported :return: The supports_cost_estimate of this LookmlModelExplore. :rtype: bool """ return self._supports_cost_estimate @supports_cost_estimate.setter def supports_cost_estimate(self, supports_cost_estimate): """ Sets the supports_cost_estimate of this LookmlModelExplore. Cost estimates supported :param supports_cost_estimate: The supports_cost_estimate of this LookmlModelExplore. :type: bool """ self._supports_cost_estimate = supports_cost_estimate @property def connection_name(self): """ Gets the connection_name of this LookmlModelExplore. Connection name :return: The connection_name of this LookmlModelExplore. :rtype: str """ return self._connection_name @connection_name.setter def connection_name(self, connection_name): """ Sets the connection_name of this LookmlModelExplore. Connection name :param connection_name: The connection_name of this LookmlModelExplore. :type: str """ self._connection_name = connection_name @property def null_sort_treatment(self): """ Gets the null_sort_treatment of this LookmlModelExplore. How nulls are sorted, possible values are \"low\", \"high\", \"first\" and \"last\" :return: The null_sort_treatment of this LookmlModelExplore. :rtype: str """ return self._null_sort_treatment @null_sort_treatment.setter def null_sort_treatment(self, null_sort_treatment): """ Sets the null_sort_treatment of this LookmlModelExplore. How nulls are sorted, possible values are \"low\", \"high\", \"first\" and \"last\" :param null_sort_treatment: The null_sort_treatment of this LookmlModelExplore. :type: str """ self._null_sort_treatment = null_sort_treatment @property def files(self): """ Gets the files of this LookmlModelExplore. List of model source files :return: The files of this LookmlModelExplore. :rtype: list[str] """ return self._files @files.setter def files(self, files): """ Sets the files of this LookmlModelExplore. List of model source files :param files: The files of this LookmlModelExplore. :type: list[str] """ self._files = files @property def source_file(self): """ Gets the source_file of this LookmlModelExplore. Primary source_file file :return: The source_file of this LookmlModelExplore. :rtype: str """ return self._source_file @source_file.setter def source_file(self, source_file): """ Sets the source_file of this LookmlModelExplore. Primary source_file file :param source_file: The source_file of this LookmlModelExplore. :type: str """ self._source_file = source_file @property def project_name(self): """ Gets the project_name of this LookmlModelExplore. Name of project :return: The project_name of this LookmlModelExplore. :rtype: str """ return self._project_name @project_name.setter def project_name(self, project_name): """ Sets the project_name of this LookmlModelExplore. Name of project :param project_name: The project_name of this LookmlModelExplore. :type: str """ self._project_name = project_name @property def model_name(self): """ Gets the model_name of this LookmlModelExplore. Name of model :return: The model_name of this LookmlModelExplore. :rtype: str """ return self._model_name @model_name.setter def model_name(self, model_name): """ Sets the model_name of this LookmlModelExplore. Name of model :param model_name: The model_name of this LookmlModelExplore. :type: str """ self._model_name = model_name @property def view_name(self): """ Gets the view_name of this LookmlModelExplore. Name of view :return: The view_name of this LookmlModelExplore. :rtype: str """ return self._view_name @view_name.setter def view_name(self, view_name): """ Sets the view_name of this LookmlModelExplore. Name of view :param view_name: The view_name of this LookmlModelExplore. :type: str """ self._view_name = view_name @property def hidden(self): """ Gets the hidden of this LookmlModelExplore. Is hidden :return: The hidden of this LookmlModelExplore. :rtype: bool """ return self._hidden @hidden.setter def hidden(self, hidden): """ Sets the hidden of this LookmlModelExplore. Is hidden :param hidden: The hidden of this LookmlModelExplore. :type: bool """ self._hidden = hidden @property def sql_table_name(self): """ Gets the sql_table_name of this LookmlModelExplore. A sql_table_name expression that defines what sql table the view/explore maps onto. Example: \"prod_orders2 AS orders\" in a view named orders. :return: The sql_table_name of this LookmlModelExplore. :rtype: str """ return self._sql_table_name @sql_table_name.setter def sql_table_name(self, sql_table_name): """ Sets the sql_table_name of this LookmlModelExplore. A sql_table_name expression that defines what sql table the view/explore maps onto. Example: \"prod_orders2 AS orders\" in a view named orders. :param sql_table_name: The sql_table_name of this LookmlModelExplore. :type: str """ self._sql_table_name = sql_table_name @property def access_filter_fields(self): """ Gets the access_filter_fields of this LookmlModelExplore. (DEPRECATED) Array of access filter field names :return: The access_filter_fields of this LookmlModelExplore. :rtype: list[str] """ return self._access_filter_fields @access_filter_fields.setter def access_filter_fields(self, access_filter_fields): """ Sets the access_filter_fields of this LookmlModelExplore. (DEPRECATED) Array of access filter field names :param access_filter_fields: The access_filter_fields of this LookmlModelExplore. :type: list[str] """ self._access_filter_fields = access_filter_fields @property def access_filters(self): """ Gets the access_filters of this LookmlModelExplore. Access filters :return: The access_filters of this LookmlModelExplore. :rtype: list[LookmlModelExploreAccessFilter] """ return self._access_filters @access_filters.setter def access_filters(self, access_filters): """ Sets the access_filters of this LookmlModelExplore. Access filters :param access_filters: The access_filters of this LookmlModelExplore. :type: list[LookmlModelExploreAccessFilter] """ self._access_filters = access_filters @property def aliases(self): """ Gets the aliases of this LookmlModelExplore. Aliases :return: The aliases of this LookmlModelExplore. :rtype: list[LookmlModelExploreAlias] """ return self._aliases @aliases.setter def aliases(self, aliases): """ Sets the aliases of this LookmlModelExplore. Aliases :param aliases: The aliases of this LookmlModelExplore. :type: list[LookmlModelExploreAlias] """ self._aliases = aliases @property def always_filter(self): """ Gets the always_filter of this LookmlModelExplore. Always filter :return: The always_filter of this LookmlModelExplore. :rtype: list[LookmlModelExploreAlwaysFilter] """ return self._always_filter @always_filter.setter def always_filter(self, always_filter): """ Sets the always_filter of this LookmlModelExplore. Always filter :param always_filter: The always_filter of this LookmlModelExplore. :type: list[LookmlModelExploreAlwaysFilter] """ self._always_filter = always_filter @property def conditionally_filter(self): """ Gets the conditionally_filter of this LookmlModelExplore. Conditionally filter :return: The conditionally_filter of this LookmlModelExplore. :rtype: list[LookmlModelExploreConditionallyFilter] """ return self._conditionally_filter @conditionally_filter.setter def conditionally_filter(self, conditionally_filter): """ Sets the conditionally_filter of this LookmlModelExplore. Conditionally filter :param conditionally_filter: The conditionally_filter of this LookmlModelExplore. :type: list[LookmlModelExploreConditionallyFilter] """ self._conditionally_filter = conditionally_filter @property def index_fields(self): """ Gets the index_fields of this LookmlModelExplore. Array of index fields :return: The index_fields of this LookmlModelExplore. :rtype: list[str] """ return self._index_fields @index_fields.setter def index_fields(self, index_fields): """ Sets the index_fields of this LookmlModelExplore. Array of index fields :param index_fields: The index_fields of this LookmlModelExplore. :type: list[str] """ self._index_fields = index_fields @property def sets(self): """ Gets the sets of this LookmlModelExplore. Sets :return: The sets of this LookmlModelExplore. :rtype: list[LookmlModelExploreSet] """ return self._sets @sets.setter def sets(self, sets): """ Sets the sets of this LookmlModelExplore. Sets :param sets: The sets of this LookmlModelExplore. :type: list[LookmlModelExploreSet] """ self._sets = sets @property def errors(self): """ Gets the errors of this LookmlModelExplore. Errors :return: The errors of this LookmlModelExplore. :rtype: list[LookmlModelExploreError] """ return self._errors @errors.setter def errors(self, errors): """ Sets the errors of this LookmlModelExplore. Errors :param errors: The errors of this LookmlModelExplore. :type: list[LookmlModelExploreError] """ self._errors = errors @property def fields(self): """ Gets the fields of this LookmlModelExplore. Fields :return: The fields of this LookmlModelExplore. :rtype: LookmlModelExploreFieldset """ return self._fields @fields.setter def fields(self, fields): """ Sets the fields of this LookmlModelExplore. Fields :param fields: The fields of this LookmlModelExplore. :type: LookmlModelExploreFieldset """ self._fields = fields @property def joins(self): """ Gets the joins of this LookmlModelExplore. Views joined into this explore :return: The joins of this LookmlModelExplore. :rtype: list[LookmlModelExploreJoins] """ return self._joins @joins.setter def joins(self, joins): """ Sets the joins of this LookmlModelExplore. Views joined into this explore :param joins: The joins of this LookmlModelExplore. :type: list[LookmlModelExploreJoins] """ self._joins = joins @property def group_label(self): """ Gets the group_label of this LookmlModelExplore. Label used to group explores in the navigation menus :return: The group_label of this LookmlModelExplore. :rtype: str """ return self._group_label @group_label.setter def group_label(self, group_label): """ Sets the group_label of this LookmlModelExplore. Label used to group explores in the navigation menus :param group_label: The group_label of this LookmlModelExplore. :type: str """ self._group_label = group_label @property def supported_measure_types(self): """ Gets the supported_measure_types of this LookmlModelExplore. An array of items describing which custom measure types are supported for creating a custom measure 'baed_on' each possible dimension type. :return: The supported_measure_types of this LookmlModelExplore. :rtype: list[LookmlModelExploreSupportedMeasureType] """ return self._supported_measure_types @supported_measure_types.setter def supported_measure_types(self, supported_measure_types): """ Sets the supported_measure_types of this LookmlModelExplore. An array of items describing which custom measure types are supported for creating a custom measure 'baed_on' each possible dimension type. :param supported_measure_types: The supported_measure_types of this LookmlModelExplore. :type: list[LookmlModelExploreSupportedMeasureType] """ self._supported_measure_types = supported_measure_types def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, LookmlModelExplore): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
mit
brendandburns/tensorflow
tensorflow/python/training/queue_runner_test.py
5
6725
"""Tests for QueueRunner.""" import time import tensorflow.python.platform import tensorflow as tf class QueueRunnerTest(tf.test.TestCase): def testBasic(self): with self.test_session() as sess: # CountUpTo will raise OUT_OF_RANGE when it reaches the count. zero64 = tf.constant(0, dtype=tf.int64) var = tf.Variable(zero64) count_up_to = var.count_up_to(3) queue = tf.FIFOQueue(10, tf.float32) tf.initialize_all_variables().run() qr = tf.train.QueueRunner(queue, [count_up_to]) threads = qr.create_threads(sess) for t in threads: t.start() for t in threads: t.join() self.assertEqual(0, len(qr.exceptions_raised)) # The variable should be 3. self.assertEqual(3, var.eval()) def testTwoOps(self): with self.test_session() as sess: # CountUpTo will raise OUT_OF_RANGE when it reaches the count. zero64 = tf.constant(0, dtype=tf.int64) var0 = tf.Variable(zero64) count_up_to_3 = var0.count_up_to(3) var1 = tf.Variable(zero64) count_up_to_30 = var1.count_up_to(30) queue = tf.FIFOQueue(10, tf.float32) qr = tf.train.QueueRunner(queue, [count_up_to_3, count_up_to_30]) threads = qr.create_threads(sess) tf.initialize_all_variables().run() for t in threads: t.start() for t in threads: t.join() self.assertEqual(0, len(qr.exceptions_raised)) self.assertEqual(3, var0.eval()) self.assertEqual(30, var1.eval()) def testExceptionsCaptured(self): with self.test_session() as sess: queue = tf.FIFOQueue(10, tf.float32) qr = tf.train.QueueRunner(queue, ["i fail", "so fail"]) threads = qr.create_threads(sess) tf.initialize_all_variables().run() for t in threads: t.start() for t in threads: t.join() exceptions = qr.exceptions_raised self.assertEqual(2, len(exceptions)) self.assertTrue("Operation not in the graph" in str(exceptions[0])) self.assertTrue("Operation not in the graph" in str(exceptions[1])) def testRealDequeueEnqueue(self): with self.test_session() as sess: q0 = tf.FIFOQueue(3, tf.float32) enqueue0 = q0.enqueue((10.0,)) close0 = q0.close() q1 = tf.FIFOQueue(30, tf.float32) enqueue1 = q1.enqueue((q0.dequeue(),)) dequeue1 = q1.dequeue() qr = tf.train.QueueRunner(q1, [enqueue1]) threads = qr.create_threads(sess) for t in threads: t.start() # Enqueue 2 values, then close queue0. enqueue0.run() enqueue0.run() close0.run() # Wait for the queue runner to terminate. for t in threads: t.join() # It should have terminated cleanly. self.assertEqual(0, len(qr.exceptions_raised)) # The 2 values should be in queue1. self.assertEqual(10.0, dequeue1.eval()) self.assertEqual(10.0, dequeue1.eval()) # And queue1 should now be closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed"): dequeue1.eval() def testRespectCoordShouldStop(self): with self.test_session() as sess: # CountUpTo will raise OUT_OF_RANGE when it reaches the count. zero64 = tf.constant(0, dtype=tf.int64) var = tf.Variable(zero64) count_up_to = var.count_up_to(3) queue = tf.FIFOQueue(10, tf.float32) tf.initialize_all_variables().run() qr = tf.train.QueueRunner(queue, [count_up_to]) # As the coordinator to stop. The queue runner should # finish immediately. coord = tf.train.Coordinator() coord.request_stop() threads = qr.create_threads(sess, coord) for t in threads: t.start() coord.join(threads) self.assertEqual(0, len(qr.exceptions_raised)) # The variable should be 0. self.assertEqual(0, var.eval()) def testRequestStopOnException(self): with self.test_session() as sess: queue = tf.FIFOQueue(10, tf.float32) qr = tf.train.QueueRunner(queue, ["not an op"]) coord = tf.train.Coordinator() threads = qr.create_threads(sess, coord) for t in threads: t.start() # The exception should be re-raised when joining. with self.assertRaisesRegexp(ValueError, "Operation not in the graph"): coord.join(threads) def testGracePeriod(self): with self.test_session() as sess: # The enqueue will quickly block. queue = tf.FIFOQueue(2, tf.float32) enqueue = queue.enqueue((10.0,)) dequeue = queue.dequeue() qr = tf.train.QueueRunner(queue, [enqueue]) coord = tf.train.Coordinator() threads = qr.create_threads(sess, coord, start=True) # Dequeue one element and then request stop. dequeue.op.run() time.sleep(0.02) coord.request_stop() # We should be able to join because the RequestStop() will cause # the queue to be closed and the enqueue to terminate. coord.join(threads, stop_grace_period_secs=0.05) def testNoMultiThreads(self): with self.test_session() as sess: # CountUpTo will raise OUT_OF_RANGE when it reaches the count. zero64 = tf.constant(0, dtype=tf.int64) var = tf.Variable(zero64) count_up_to = var.count_up_to(3) queue = tf.FIFOQueue(10, tf.float32) tf.initialize_all_variables().run() coord = tf.train.Coordinator() qr = tf.train.QueueRunner(queue, [count_up_to]) threads = [] threads.extend(qr.create_threads(sess, coord=coord)) with self.assertRaisesRegexp( RuntimeError, "Threads are already running"): threads.extend(qr.create_threads(sess, coord=coord)) coord.request_stop() coord.join(threads, stop_grace_period_secs=0.5) def testThreads(self): with self.test_session() as sess: # CountUpTo will raise OUT_OF_RANGE when it reaches the count. zero64 = tf.constant(0, dtype=tf.int64) var = tf.Variable(zero64) count_up_to = var.count_up_to(3) queue = tf.FIFOQueue(10, tf.float32) tf.initialize_all_variables().run() qr = tf.train.QueueRunner(queue, [count_up_to, "bad op"]) threads = qr.create_threads(sess, start=True) for t in threads: t.join() exceptions = qr.exceptions_raised self.assertEqual(1, len(exceptions)) self.assertTrue("Operation not in the graph" in str(exceptions[0])) threads = qr.create_threads(sess, start=True) for t in threads: t.join() exceptions = qr.exceptions_raised self.assertEqual(1, len(exceptions)) self.assertTrue("Operation not in the graph" in str(exceptions[0])) if __name__ == "__main__": tf.test.main()
apache-2.0
sebastien-j/gensim
gensim/corpora/sharded_corpus.py
63
35097
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Original author: Jan Hajic jr. # Copyright (C) 2015 Radim Rehurek and gensim team. # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ This module implements a corpus class that stores its data in separate files called "shards". This is a compromise between speed (keeping the whole dataset in memory) and memory footprint (keeping the data on disk and reading from it on demand). The corpus is intended for situations where you need to use your data as numpy arrays for some iterative processing (like training something using SGD, which usually involves heavy matrix multiplication). """ from __future__ import print_function import logging import os import math import numpy import scipy.sparse as sparse import time logger = logging.getLogger(__name__) #: Specifies which dtype should be used for serializing the shards. _default_dtype = float try: import theano _default_dtype = theano.config.floatX except ImportError: logger.info('Could not import Theano, will use standard float for default ShardedCorpus dtype.') from six.moves import xrange import gensim from gensim.corpora import IndexedCorpus from gensim.interfaces import TransformedCorpus class ShardedCorpus(IndexedCorpus): """ This corpus is designed for situations where you need to train a model on matrices, with a large number of iterations. (It should be faster than gensim's other IndexedCorpus implementations for this use case; check the `benchmark_datasets.py` script. It should also serialize faster.) The corpus stores its data in separate files called "shards". This is a compromise between speed (keeping the whole dataset in memory) and memory footprint (keeping the data on disk and reading from it on demand). Persistence is done using the standard gensim load/save methods. .. note:: The dataset is **read-only**, there is - as opposed to gensim's Similarity class, which works similarly - no way of adding documents to the dataset (for now). You can use ShardedCorpus to serialize your data just like any other gensim corpus that implements serialization. However, because the data is saved as numpy 2-dimensional ndarrays (or scipy sparse matrices), you need to supply the dimension of your data to the corpus. (The dimension of word frequency vectors will typically be the size of the vocabulary, etc.) >>> corpus = gensim.utils.mock_data() >>> output_prefix = 'mydata.shdat' >>> ShardedCorpus.serialize(output_prefix, corpus, dim=1000) The `output_prefix` tells the ShardedCorpus where to put the data. Shards are saved as `output_prefix.0`, `output_prefix.1`, etc. All shards must be of the same size. The shards can be re-sized (which is essentially a re-serialization into new-size shards), but note that this operation will temporarily take twice as much disk space, because the old shards are not deleted until the new shards are safely in place. After serializing the data, the corpus will then save itself to the file `output_prefix`. On further initialization with the same `output_prefix`, the corpus will load the already built dataset unless the `overwrite` option is given. (A new object is "cloned" from the one saved to `output_prefix` previously.) To retrieve data, you can load the corpus and use it like a list: >>> sh_corpus = ShardedCorpus.load(output_prefix) >>> batch = sh_corpus[100:150] This will retrieve a numpy 2-dimensional array of 50 rows and 1000 columns (1000 was the dimension of the data we supplied to the corpus). To retrieve gensim-style sparse vectors, set the `gensim` property: >>> sh_corpus.gensim = True >>> batch = sh_corpus[100:150] The batch now will be a generator of gensim vectors. Since the corpus needs the data serialized in order to be able to operate, it will serialize data right away on initialization. Instead of calling `ShardedCorpus.serialize()`, you can just initialize and use the corpus right away: >>> corpus = ShardedCorpus(output_prefix, corpus, dim=1000) >>> batch = corpus[100:150] ShardedCorpus also supports working with scipy sparse matrices, both during retrieval and during serialization. If you want to serialize your data as sparse matrices, set the `sparse_serialization` flag. For retrieving your data as sparse matrices, use the `sparse_retrieval` flag. (You can also retrieve densely serialized data as sparse matrices, for the sake of completeness, and vice versa.) By default, the corpus will retrieve numpy ndarrays even if it was serialized into sparse matrices. >>> sparse_prefix = 'mydata.sparse.shdat' >>> ShardedCorpus.serialize(sparse_prefix, corpus, dim=1000, sparse_serialization=True) >>> sparse_corpus = ShardedCorpus.load(sparse_prefix) >>> batch = sparse_corpus[100:150] >>> type(batch) <type 'numpy.ndarray'> >>> sparse_corpus.sparse_retrieval = True >>> batch = sparse_corpus[100:150] <class 'scipy.sparse.csr.csr_matrix'> While you *can* touch the `sparse_retrieval` attribute during the life of a ShardedCorpus object, you should definitely not touch ` `sharded_serialization`! Changing the attribute will not miraculously re-serialize the data in the requested format. The CSR format is used for sparse data throughout. Internally, to retrieve data, the dataset keeps track of which shard is currently open and on a `__getitem__` request, either returns an item from the current shard, or opens a new one. The shard size is constant, except for the last shard. """ def __init__(self, output_prefix, corpus, dim=None, shardsize=4096, overwrite=False, sparse_serialization=False, sparse_retrieval=False, gensim=False): """Initializes the dataset. If `output_prefix` is not found, builds the shards. :type output_prefix: str :param output_prefix: The absolute path to the file from which shard filenames should be derived. The individual shards will be saved as `output_prefix.0`, `output_prefix.1`, etc. The `output_prefix` path then works as the filename to which the ShardedCorpus object itself will be automatically saved. Normally, gensim corpora do not do this, but ShardedCorpus needs to remember several serialization settings: namely the shard size and whether it was serialized in dense or sparse format. By saving automatically, any new ShardedCorpus with the same `output_prefix` will be able to find the information about the data serialized with the given prefix. If you want to *overwrite* your data serialized with some output prefix, set the `overwrite` flag to True. Of course, you can save your corpus separately as well using the `save()` method. :type corpus: gensim.interfaces.CorpusABC :param corpus: The source corpus from which to build the dataset. :type dim: int :param dim: Specify beforehand what the dimension of a dataset item should be. This is useful when initializing from a corpus that doesn't advertise its dimension, or when it does and you want to check that the corpus matches the expected dimension. **If `dim` is left unused and `corpus` does not provide its dimension in an expected manner, initialization will fail.** :type shardsize: int :param shardsize: How many data points should be in one shard. More data per shard means less shard reloading but higher memory usage and vice versa. :type overwrite: bool :param overwrite: If set, will build dataset from given corpus even if `output_prefix` already exists. :type sparse_serialization: bool :param sparse_serialization: If set, will save the data in a sparse form (as csr matrices). This is to speed up retrieval when you know you will be using sparse matrices. ..note:: This property **should not change** during the lifetime of the dataset. (If you find out you need to change from a sparse to a dense representation, the best practice is to create another ShardedCorpus object.) :type sparse_retrieval: bool :param sparse_retrieval: If set, will retrieve data as sparse vectors (numpy csr matrices). If unset, will return ndarrays. Note that retrieval speed for this option depends on how the dataset was serialized. If `sparse_serialization` was set, then setting `sparse_retrieval` will be faster. However, if the two settings do not correspond, the conversion on the fly will slow the dataset down. :type gensim: bool :param gensim: If set, will convert the output to gensim sparse vectors (list of tuples (id, value)) to make it behave like any other gensim corpus. This **will** slow the dataset down. """ self.output_prefix = output_prefix self.shardsize = shardsize self.n_docs = 0 self.offsets = [] self.n_shards = 0 self.dim = dim # This number may change during initialization/loading. # Sparse vs. dense serialization and retrieval. self.sparse_serialization = sparse_serialization self.sparse_retrieval = sparse_retrieval self.gensim = gensim # The "state" of the dataset. self.current_shard = None # The current shard itself (numpy ndarray) self.current_shard_n = None # Current shard is the current_shard_n-th self.current_offset = None # The index into the dataset which # corresponds to index 0 of current shard logger.info('Initializing sharded corpus with prefix ' '{0}'.format(output_prefix)) if (not os.path.isfile(output_prefix)) or overwrite: logger.info('Building from corpus...') self.init_shards(output_prefix, corpus, shardsize) # Save automatically, to facilitate re-loading # and retain information about how the corpus # was serialized. logger.info('Saving ShardedCorpus object to ' '{0}'.format(self.output_prefix)) self.save() else: logger.info('Cloning existing...') self.init_by_clone() def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype): """Initialize shards from the corpus.""" if not gensim.utils.is_corpus(corpus): raise ValueError('Cannot initialize shards without a corpus to read' ' from! (Got corpus type: {0})'.format(type(corpus))) proposed_dim = self._guess_n_features(corpus) if proposed_dim != self.dim: if self.dim is None: logger.info('Deriving dataset dimension from corpus: ' '{0}'.format(proposed_dim)) else: logger.warn('Dataset dimension derived from input corpus diffe' 'rs from initialization argument, using corpus.' '(corpus {0}, init arg {1})'.format(proposed_dim, self.dim)) self.dim = proposed_dim self.offsets = [0] start_time = time.clock() logger.info('Running init from corpus.') for n, doc_chunk in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)): logger.info('Chunk no. {0} at {1} s'.format(n, time.clock() - start_time)) current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype) logger.debug('Current chunk dimension: ' '{0} x {1}'.format(len(doc_chunk), self.dim)) for i, doc in enumerate(doc_chunk): doc = dict(doc) current_shard[i][list(doc)] = list(gensim.matutils.itervalues(doc)) # Handles the updating as well. if self.sparse_serialization: current_shard = sparse.csr_matrix(current_shard) self.save_shard(current_shard) end_time = time.clock() logger.info('Built {0} shards in {1} s.'.format(self.n_shards, end_time - start_time)) def init_by_clone(self): """ Initialize by copying over attributes of another ShardedCorpus instance saved to the output_prefix given at __init__(). """ temp = self.__class__.load(self.output_prefix) self.n_shards = temp.n_shards self.n_docs = temp.n_docs self.offsets = temp.offsets if temp.dim != self.dim: if self.dim is None: logger.info('Loaded dataset dimension: {0}'.format(temp.dim)) else: logger.warn('Loaded dataset dimension differs from init arg ' 'dimension, using loaded dim. ' '(loaded {0}, init {1})'.format(temp.dim, self.dim)) self.dim = temp.dim # To be consistent with the loaded data! def save_shard(self, shard, n=None, filename=None): """ Pickle the given shard. If `n` is not given, will consider the shard a new one. If `filename` is given, will use that file name instead of generating one. """ new_shard = False if n is None: n = self.n_shards # Saving the *next* one by default. new_shard = True if not filename: filename = self._shard_name(n) gensim.utils.pickle(shard, filename) if new_shard: self.offsets.append(self.offsets[-1] + shard.shape[0]) self.n_docs += shard.shape[0] self.n_shards += 1 def load_shard(self, n): """ Load (unpickle) the n-th shard as the "live" part of the dataset into the Dataset object.""" #logger.debug('ShardedCorpus loading shard {0}, ' # 'current shard: {1}'.format(n, self.current_shard_n)) # No-op if the shard is already open. if self.current_shard_n == n: return filename = self._shard_name(n) if not os.path.isfile(filename): raise ValueError('Attempting to load nonexistent shard no. {0}'.format(n)) shard = gensim.utils.unpickle(filename) self.current_shard = shard self.current_shard_n = n self.current_offset = self.offsets[n] def reset(self): """ Reset to no shard at all. Used for saving. """ self.current_shard = None self.current_shard_n = None self.current_offset = None def shard_by_offset(self, offset): """ Determine which shard the given offset belongs to. If the offset is greater than the number of available documents, raises a `ValueError`. Assumes that all shards have the same size. """ k = int(offset / self.shardsize) if offset >= self.n_docs: raise ValueError('Too high offset specified ({0}), available ' 'docs: {1}'.format(offset, self.n_docs)) if offset < 0: raise ValueError('Negative offset {0} currently not' ' supported.'.format(offset)) return k k = -1 for i, o in enumerate(self.offsets): if o > offset: # Condition should fire for every valid offset, # since the last offset is n_docs (one-past-end). k = i - 1 # First offset is always 0, so i is at least 1. break return k def in_current(self, offset): """ Determine whether the given offset falls within the current shard. """ return (self.current_offset <= offset) \ and (offset < self.offsets[self.current_shard_n + 1]) def in_next(self, offset): """ Determine whether the given offset falls within the next shard. This is a very small speedup: typically, we will be iterating through the data forward. Could save considerable time with a very large number of smaller shards. """ if self.current_shard_n == self.n_shards: return False # There's no next shard. return (self.offsets[self.current_shard_n + 1] <= offset) \ and (offset < self.offsets[self.current_shard_n + 2]) def resize_shards(self, shardsize): """ Re-process the dataset to new shard size. This may take pretty long. Also, note that you need some space on disk for this one (we're assuming there is enough disk space for double the size of the dataset and that there is enough memory for old + new shardsize). :type shardsize: int :param shardsize: The new shard size. """ # Determine how many new shards there will be n_new_shards = int(math.floor(self.n_docs / float(shardsize))) if self.n_docs % shardsize != 0: n_new_shards += 1 new_shard_names = [] new_offsets = [0] for new_shard_idx in xrange(n_new_shards): new_start = shardsize * new_shard_idx new_stop = new_start + shardsize # Last shard? if new_stop > self.n_docs: # Sanity check assert new_shard_idx == n_new_shards - 1, \ 'Shard no. {0} that ends at {1} over last document' \ ' ({2}) is not the last projected shard ({3})???' \ ''.format(new_shard_idx, new_stop, self.n_docs, n_new_shards) new_stop = self.n_docs new_shard = self[new_start:new_stop] new_shard_name = self._resized_shard_name(new_shard_idx) new_shard_names.append(new_shard_name) try: self.save_shard(new_shard, new_shard_idx, new_shard_name) except Exception: # Clean up on unsuccessful resize. for new_shard_name in new_shard_names: os.remove(new_shard_name) raise new_offsets.append(new_stop) # Move old shard files out, new ones in. Complicated due to possibility # of exceptions. old_shard_names = [self._shard_name(n) for n in xrange(self.n_shards)] try: for old_shard_n, old_shard_name in enumerate(old_shard_names): os.remove(old_shard_name) except Exception as e: logger.error('Exception occurred during old shard no. {0} ' 'removal: {1}.\nAttempting to at least move ' 'new shards in.'.format(old_shard_n, str(e))) finally: # If something happens with cleaning up - try to at least get the # new guys in. try: for shard_n, new_shard_name in enumerate(new_shard_names): os.rename(new_shard_name, self._shard_name(shard_n)) # If something happens when we're in this stage, we're screwed. except Exception as e: print(e) raise RuntimeError('Resizing completely failed for some reason.' ' Sorry, dataset is probably ruined...') finally: # Sets the new shard stats. self.n_shards = n_new_shards self.offsets = new_offsets self.shardsize = shardsize self.reset() def _shard_name(self, n): """Generate the name for the n-th shard.""" return self.output_prefix + '.' + str(n) def _resized_shard_name(self, n): """ Generate the name for the n-th new shard temporary file when resizing dataset. The file will then be re-named to standard shard name. """ return self.output_prefix + '.resize-temp.' + str(n) def _guess_n_features(self, corpus): """Attempt to guess number of features in `corpus`.""" n_features = None if hasattr(corpus, 'dim'): # print 'Guessing from \'dim\' attribute.' n_features = corpus.dim elif hasattr(corpus, 'dictionary'): # print 'GUessing from dictionary.' n_features = len(corpus.dictionary) elif hasattr(corpus, 'n_out'): # print 'Guessing from \'n_out\' attribute.' n_features = corpus.n_out elif hasattr(corpus, 'num_terms'): # print 'Guessing from \'num_terms\' attribute.' n_features = corpus.num_terms elif isinstance(corpus, TransformedCorpus): # TransformedCorpus: first check if the transformer object # defines some output dimension; if it doesn't, relegate guessing # to the corpus that is being transformed. This may easily fail! try: return self._guess_n_features(corpus.obj) except TypeError: return self._guess_n_features(corpus.corpus) else: if not self.dim: raise TypeError('Couldn\'t find number of features, ' 'refusing to guess (dimension set to {0},' 'type of corpus: {1}).'.format(self.dim, type(corpus))) else: logger.warn('Couldn\'t find number of features, trusting ' 'supplied dimension ({0})'.format(self.dim)) n_features = self.dim if self.dim and n_features != self.dim: logger.warn('Discovered inconsistent dataset dim ({0}) and ' 'feature count from corpus ({1}). Coercing to dimension' ' given by argument.'.format(self.dim, n_features)) return n_features def __len__(self): return self.n_docs def _ensure_shard(self, offset): # No shard loaded if self.current_shard is None: shard_n = self.shard_by_offset(offset) self.load_shard(shard_n) # Find appropriate shard, if necessary elif not self.in_current(offset): if self.in_next(offset): self.load_shard(self.current_shard_n + 1) else: shard_n = self.shard_by_offset(offset) self.load_shard(shard_n) def get_by_offset(self, offset): """As opposed to getitem, this one only accepts ints as offsets.""" self._ensure_shard(offset) result = self.current_shard[offset - self.current_offset] return result def __getitem__(self, offset): """ Retrieve the given row of the dataset. Supports slice notation. """ if isinstance(offset, list): # Handle all serialization & retrieval options. if self.sparse_serialization: l_result = sparse.vstack([self.get_by_offset(i) for i in offset]) if self.gensim: l_result = self._getitem_sparse2gensim(l_result) elif not self.sparse_retrieval: l_result = numpy.array(l_result.todense()) else: l_result = numpy.array([self.get_by_offset(i) for i in offset]) if self.gensim: l_result = self._getitem_dense2gensim(l_result) elif self.sparse_retrieval: l_result = sparse.csr_matrix(l_result) return l_result elif isinstance(offset, slice): start = offset.start stop = offset.stop if stop > self.n_docs: raise IndexError('Requested slice offset {0} out of range' ' ({1} docs)'.format(stop, self.n_docs)) # - get range of shards over which to iterate first_shard = self.shard_by_offset(start) last_shard = self.n_shards - 1 if not stop == self.n_docs: last_shard = self.shard_by_offset(stop) # This fails on one-past # slice indexing; that's why there's a code branch here. #logger.debug('ShardedCorpus: Retrieving slice {0}: ' # 'shard {1}'.format((offset.start, offset.stop), # (first_shard, last_shard))) self.load_shard(first_shard) # The easy case: both in one shard. if first_shard == last_shard: s_result = self.current_shard[start - self.current_offset: stop - self.current_offset] # Handle different sparsity settings: s_result = self._getitem_format(s_result) return s_result # The hard case: the slice is distributed across multiple shards # - initialize numpy.zeros() s_result = numpy.zeros((stop - start, self.dim), dtype=self.current_shard.dtype) if self.sparse_serialization: s_result = sparse.csr_matrix((0, self.dim), dtype=self.current_shard.dtype) # - gradually build it up. We will be using three set of start:stop # indexes: # - into the dataset (these are the indexes the caller works with) # - into the current shard # - into the result # Indexes into current result rows. These are always smaller than # the dataset indexes by `start` (as we move over the shards, # we're moving by the same number of rows through the result). result_start = 0 result_stop = self.offsets[self.current_shard_n + 1] - start # Indexes into current shard. These are trickiest: # - if in starting shard, these are from (start - current_offset) # to self.shardsize # - if in intermediate shard, these are from 0 to self.shardsize # - if in ending shard, these are from 0 # to (stop - current_offset) shard_start = start - self.current_offset shard_stop = self.offsets[self.current_shard_n + 1] - \ self.current_offset #s_result[result_start:result_stop] = self.current_shard[ # shard_start:shard_stop] s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop) # First and last get special treatment, these are in between for shard_n in xrange(first_shard+1, last_shard): self.load_shard(shard_n) result_start = result_stop result_stop += self.shardsize shard_start = 0 shard_stop = self.shardsize s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop) # Last shard self.load_shard(last_shard) result_start = result_stop result_stop += stop - self.current_offset shard_start = 0 shard_stop = stop - self.current_offset s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop) s_result = self._getitem_format(s_result) return s_result else: s_result = self.get_by_offset(offset) s_result = self._getitem_format(s_result) return s_result def __add_to_slice(self, s_result, result_start, result_stop, start, stop): """ Add the rows of the current shard from `start` to `stop` into rows `result_start` to `result_stop` of `s_result`. Operation is based on the self.sparse_serialize setting. If the shard contents are dense, then s_result is assumed to be an ndarray that already supports row indices `result_start:result_stop`. If the shard contents are sparse, assumes that s_result has `result_start` rows and we should add them up to `result_stop`. Returns the resulting s_result. """ if (result_stop - result_start) != (stop - start): raise ValueError('Result start/stop range different than stop/start' 'range (%d - %d vs. %d - %d)'.format(result_start, result_stop, start, stop)) # Dense data: just copy using numpy's slice notation if not self.sparse_serialization: s_result[result_start:result_stop] = self.current_shard[start:stop] return s_result # A bit more difficult, we're using a different structure to build the # result. else: if s_result.shape != (result_start, self.dim): raise ValueError('Assuption about sparse s_result shape ' 'invalid: {0} expected rows, {1} real ' 'rows.'.format(result_start, s_result.shape[0])) tmp_matrix = self.current_shard[start:stop] s_result = sparse.vstack([s_result, tmp_matrix]) return s_result def _getitem_format(self, s_result): if self.sparse_serialization: if self.gensim: s_result = self._getitem_sparse2gensim(s_result) elif not self.sparse_retrieval: s_result = numpy.array(s_result.todense()) else: if self.gensim: s_result = self._getitem_dense2gensim(s_result) elif self.sparse_retrieval: s_result = sparse.csr_matrix(s_result) return s_result def _getitem_sparse2gensim(self, result): """ Change given sparse result matrix to gensim sparse vectors. Uses the internals of the sparse matrix to make this fast. """ def row_sparse2gensim(row_idx, csr_matrix): indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[row_idx+1]] g_row = [(col_idx, csr_matrix[row_idx, col_idx]) for col_idx in indices] return g_row output = (row_sparse2gensim(i, result) for i in xrange(result.shape[0])) return output def _getitem_dense2gensim(self, result): """Change given dense result matrix to gensim sparse vectors.""" if len(result.shape) == 1: output = gensim.matutils.full2sparse(result) else: output = (gensim.matutils.full2sparse(result[i]) for i in xrange(result.shape[0])) return output # Overriding the IndexedCorpus and other corpus superclass methods def __iter__(self): """ Yield dataset items one by one (generator). """ for i in xrange(len(self)): yield self[i] def save(self, *args, **kwargs): """ Save itself (the wrapper) in clean state (after calling `reset()`) to the output_prefix file. If you wish to save to a different file, use the `fname` argument as the first positional arg. """ # Can we save to a different file than output_prefix? Well, why not? if len(args) == 0: args = tuple([self.output_prefix]) attrs_to_ignore = ['current_shard', 'current_shard_n', 'current_offset'] if 'ignore' not in kwargs: kwargs['ignore'] = frozenset(attrs_to_ignore) else: kwargs['ignore'] = frozenset([v for v in kwargs['ignore']] + attrs_to_ignore) super(ShardedCorpus, self).save(*args, **kwargs) # # self.reset() # with smart_open(self.output_prefix, 'wb') as pickle_handle: # cPickle.dump(self, pickle_handle) @classmethod def load(cls, fname, mmap=None): """ Load itself in clean state. `mmap` has no effect here. """ return super(ShardedCorpus, cls).load(fname, mmap) @staticmethod def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False, **kwargs): """ Implement a serialization interface. Do not call directly; use the `serialize` method instead. Note that you might need some ShardedCorpus init parameters, most likely the dimension (`dim`). Again, pass these as `kwargs` to the `serialize` method. All this thing does is initialize a ShardedCorpus from a corpus with the `output_prefix` argument set to the `fname` parameter of this method. The initialization of a ShardedCorpus takes care of serializing the data (in dense form) to shards. Ignore the parameters id2word, progress_cnt and metadata. They currently do nothing and are here only to provide a compatible method signature with superclass. """ ShardedCorpus(fname, corpus, **kwargs) @classmethod def serialize(serializer, fname, corpus, id2word=None, index_fname=None, progress_cnt=None, labels=None, metadata=False, **kwargs): """ Iterate through the document stream `corpus`, saving the documents as a ShardedCorpus to `fname`. Use this method instead of calling `save_corpus` directly. You may need to supply some kwargs that are used upon dataset creation (namely: `dim`, unless the dataset can infer the dimension from the given corpus). Ignore the parameters id2word, index_fname, progress_cnt, labels and metadata. They currently do nothing and are here only to provide a compatible method signature with superclass.""" serializer.save_corpus(fname, corpus, id2word=id2word, progress_cnt=progress_cnt, metadata=metadata, **kwargs)
gpl-3.0
zmike/servo
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py
658
7598
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Message related utilities. Note: request.connection.write/read are used in this module, even though mod_python document says that they should be used only in connection handlers. Unfortunately, we have no other options. For example, request.write/read are not suitable because they don't allow direct raw bytes writing/reading. """ import Queue import threading # Export Exception symbols from msgutil for backward compatibility from mod_pywebsocket._stream_base import ConnectionTerminatedException from mod_pywebsocket._stream_base import InvalidFrameException from mod_pywebsocket._stream_base import BadOperationException from mod_pywebsocket._stream_base import UnsupportedFrameException # An API for handler to send/receive WebSocket messages. def close_connection(request): """Close connection. Args: request: mod_python request. """ request.ws_stream.close_connection() def send_message(request, payload_data, end=True, binary=False): """Send a message (or part of a message). Args: request: mod_python request. payload_data: unicode text or str binary to send. end: True to terminate a message. False to send payload_data as part of a message that is to be terminated by next or later send_message call with end=True. binary: send payload_data as binary frame(s). Raises: BadOperationException: when server already terminated. """ request.ws_stream.send_message(payload_data, end, binary) def receive_message(request): """Receive a WebSocket frame and return its payload as a text in unicode or a binary in str. Args: request: mod_python request. Raises: InvalidFrameException: when client send invalid frame. UnsupportedFrameException: when client send unsupported frame e.g. some of reserved bit is set but no extension can recognize it. InvalidUTF8Exception: when client send a text frame containing any invalid UTF-8 string. ConnectionTerminatedException: when the connection is closed unexpectedly. BadOperationException: when client already terminated. """ return request.ws_stream.receive_message() def send_ping(request, body=''): request.ws_stream.send_ping(body) class MessageReceiver(threading.Thread): """This class receives messages from the client. This class provides three ways to receive messages: blocking, non-blocking, and via callback. Callback has the highest precedence. Note: This class should not be used with the standalone server for wss because pyOpenSSL used by the server raises a fatal error if the socket is accessed from multiple threads. """ def __init__(self, request, onmessage=None): """Construct an instance. Args: request: mod_python request. onmessage: a function to be called when a message is received. May be None. If not None, the function is called on another thread. In that case, MessageReceiver.receive and MessageReceiver.receive_nowait are useless because they will never return any messages. """ threading.Thread.__init__(self) self._request = request self._queue = Queue.Queue() self._onmessage = onmessage self._stop_requested = False self.setDaemon(True) self.start() def run(self): try: while not self._stop_requested: message = receive_message(self._request) if self._onmessage: self._onmessage(message) else: self._queue.put(message) finally: close_connection(self._request) def receive(self): """ Receive a message from the channel, blocking. Returns: message as a unicode string. """ return self._queue.get() def receive_nowait(self): """ Receive a message from the channel, non-blocking. Returns: message as a unicode string if available. None otherwise. """ try: message = self._queue.get_nowait() except Queue.Empty: message = None return message def stop(self): """Request to stop this instance. The instance will be stopped after receiving the next message. This method may not be very useful, but there is no clean way in Python to forcefully stop a running thread. """ self._stop_requested = True class MessageSender(threading.Thread): """This class sends messages to the client. This class provides both synchronous and asynchronous ways to send messages. Note: This class should not be used with the standalone server for wss because pyOpenSSL used by the server raises a fatal error if the socket is accessed from multiple threads. """ def __init__(self, request): """Construct an instance. Args: request: mod_python request. """ threading.Thread.__init__(self) self._request = request self._queue = Queue.Queue() self.setDaemon(True) self.start() def run(self): while True: message, condition = self._queue.get() condition.acquire() send_message(self._request, message) condition.notify() condition.release() def send(self, message): """Send a message, blocking.""" condition = threading.Condition() condition.acquire() self._queue.put((message, condition)) condition.wait() def send_nowait(self, message): """Send a message, non-blocking.""" self._queue.put((message, threading.Condition())) # vi:sts=4 sw=4 et
mpl-2.0
JacquesLucke/still-lambda
pyglet/gl/lib_wgl.py
41
5761
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- ''' ''' __docformat__ = 'restructuredtext' __version__ = '$Id: lib_glx.py 597 2007-02-03 16:13:07Z Alex.Holkner $' import ctypes from ctypes import * import pyglet from pyglet.gl.lib import missing_function, decorate_function from pyglet.compat import asbytes __all__ = ['link_GL', 'link_GLU', 'link_WGL'] _debug_trace = pyglet.options['debug_trace'] gl_lib = ctypes.windll.opengl32 glu_lib = ctypes.windll.glu32 wgl_lib = gl_lib if _debug_trace: from pyglet.lib import _TraceLibrary gl_lib = _TraceLibrary(gl_lib) glu_lib = _TraceLibrary(glu_lib) wgl_lib = _TraceLibrary(wgl_lib) try: wglGetProcAddress = wgl_lib.wglGetProcAddress wglGetProcAddress.restype = CFUNCTYPE(POINTER(c_int)) wglGetProcAddress.argtypes = [c_char_p] _have_get_proc_address = True except AttributeError: _have_get_proc_address = False class WGLFunctionProxy(object): __slots__ = ['name', 'requires', 'suggestions', 'ftype', 'func'] def __init__(self, name, ftype, requires, suggestions): assert _have_get_proc_address self.name = name self.ftype = ftype self.requires = requires self.suggestions = suggestions self.func = None def __call__(self, *args, **kwargs): if self.func: return self.func(*args, **kwargs) from pyglet.gl import current_context if not current_context: raise Exception( 'Call to function "%s" before GL context created' % self.name) address = wglGetProcAddress(asbytes(self.name)) if cast(address, POINTER(c_int)): # check cast because address is func self.func = cast(address, self.ftype) decorate_function(self.func, self.name) else: self.func = missing_function( self.name, self.requires, self.suggestions) result = self.func(*args, **kwargs) return result def link_GL(name, restype, argtypes, requires=None, suggestions=None): try: func = getattr(gl_lib, name) func.restype = restype func.argtypes = argtypes decorate_function(func, name) return func except AttributeError: # Not in opengl32.dll. Try and get a pointer from WGL. try: fargs = (restype,) + tuple(argtypes) ftype = ctypes.WINFUNCTYPE(*fargs) if _have_get_proc_address: from pyglet.gl import gl_info if gl_info.have_context(): address = wglGetProcAddress(name) if address: func = cast(address, ftype) decorate_function(func, name) return func else: # Insert proxy until we have a context return WGLFunctionProxy(name, ftype, requires, suggestions) except: pass return missing_function(name, requires, suggestions) def link_GLU(name, restype, argtypes, requires=None, suggestions=None): try: func = getattr(glu_lib, name) func.restype = restype func.argtypes = argtypes decorate_function(func, name) return func except AttributeError: # Not in glu32.dll. Try and get a pointer from WGL. try: fargs = (restype,) + tuple(argtypes) ftype = ctypes.WINFUNCTYPE(*fargs) if _have_get_proc_address: from pyglet.gl import gl_info if gl_info.have_context(): address = wglGetProcAddress(name) if address: func = cast(address, ftype) decorate_function(func, name) return func else: # Insert proxy until we have a context return WGLFunctionProxy(name, ftype, requires, suggestions) except: pass return missing_function(name, requires, suggestions) link_WGL = link_GL
bsd-3-clause
gioman/QGIS
python/plugins/processing/algs/qgis/ShortestPathPointToLayer.py
1
11506
# -*- coding: utf-8 -*- """ *************************************************************************** ShortestPathPointToLayer.py --------------------- Date : December 2016 Copyright : (C) 2016 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'December 2016' __copyright__ = '(C) 2016, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from collections import OrderedDict from qgis.PyQt.QtCore import QVariant from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsWkbTypes, QgsUnitTypes, QgsFeature, QgsGeometry, QgsPoint, QgsFields, QgsField, QgsFeatureRequest, QgsMessageLog, QgsProcessingUtils) from qgis.analysis import (QgsVectorLayerDirector, QgsNetworkDistanceStrategy, QgsNetworkSpeedStrategy, QgsGraphBuilder, QgsGraphAnalyzer ) from qgis.utils import iface from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import (ParameterVector, ParameterPoint, ParameterNumber, ParameterString, ParameterTableField, ParameterSelection ) from processing.core.outputs import OutputVector from processing.tools import dataobjects pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class ShortestPathPointToLayer(GeoAlgorithm): INPUT_VECTOR = 'INPUT_VECTOR' START_POINT = 'START_POINT' END_POINTS = 'END_POINTS' STRATEGY = 'STRATEGY' DIRECTION_FIELD = 'DIRECTION_FIELD' VALUE_FORWARD = 'VALUE_FORWARD' VALUE_BACKWARD = 'VALUE_BACKWARD' VALUE_BOTH = 'VALUE_BOTH' DEFAULT_DIRECTION = 'DEFAULT_DIRECTION' SPEED_FIELD = 'SPEED_FIELD' DEFAULT_SPEED = 'DEFAULT_SPEED' TOLERANCE = 'TOLERANCE' OUTPUT_LAYER = 'OUTPUT_LAYER' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'networkanalysis.svg')) def group(self): return self.tr('Network analysis') def name(self): return 'shortestpathpointtolayer' def displayName(self): return self.tr('Shortest path (point to layer)') def defineCharacteristics(self): self.DIRECTIONS = OrderedDict([ (self.tr('Forward direction'), QgsVectorLayerDirector.DirectionForward), (self.tr('Backward direction'), QgsVectorLayerDirector.DirectionForward), (self.tr('Both directions'), QgsVectorLayerDirector.DirectionForward)]) self.STRATEGIES = [self.tr('Shortest'), self.tr('Fastest') ] self.addParameter(ParameterVector(self.INPUT_VECTOR, self.tr('Vector layer representing network'), [dataobjects.TYPE_VECTOR_LINE])) self.addParameter(ParameterPoint(self.START_POINT, self.tr('Start point'))) self.addParameter(ParameterVector(self.END_POINTS, self.tr('Vector layer with end points'), [dataobjects.TYPE_VECTOR_POINT])) self.addParameter(ParameterSelection(self.STRATEGY, self.tr('Path type to calculate'), self.STRATEGIES, default=0)) params = [] params.append(ParameterTableField(self.DIRECTION_FIELD, self.tr('Direction field'), self.INPUT_VECTOR, optional=True)) params.append(ParameterString(self.VALUE_FORWARD, self.tr('Value for forward direction'), '', optional=True)) params.append(ParameterString(self.VALUE_BACKWARD, self.tr('Value for backward direction'), '', optional=True)) params.append(ParameterString(self.VALUE_BOTH, self.tr('Value for both directions'), '', optional=True)) params.append(ParameterSelection(self.DEFAULT_DIRECTION, self.tr('Default direction'), list(self.DIRECTIONS.keys()), default=2)) params.append(ParameterTableField(self.SPEED_FIELD, self.tr('Speed field'), self.INPUT_VECTOR, optional=True)) params.append(ParameterNumber(self.DEFAULT_SPEED, self.tr('Default speed (km/h)'), 0.0, 99999999.999999, 5.0)) params.append(ParameterNumber(self.TOLERANCE, self.tr('Topology tolerance'), 0.0, 99999999.999999, 0.0)) for p in params: p.isAdvanced = True self.addParameter(p) self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Shortest path'), datatype=[dataobjects.TYPE_VECTOR_LINE])) def processAlgorithm(self, context, feedback): layer = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.INPUT_VECTOR), context) startPoint = self.getParameterValue(self.START_POINT) endPoints = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.END_POINTS), context) strategy = self.getParameterValue(self.STRATEGY) directionFieldName = self.getParameterValue(self.DIRECTION_FIELD) forwardValue = self.getParameterValue(self.VALUE_FORWARD) backwardValue = self.getParameterValue(self.VALUE_BACKWARD) bothValue = self.getParameterValue(self.VALUE_BOTH) defaultDirection = self.getParameterValue(self.DEFAULT_DIRECTION) bothValue = self.getParameterValue(self.VALUE_BOTH) defaultDirection = self.getParameterValue(self.DEFAULT_DIRECTION) speedFieldName = self.getParameterValue(self.SPEED_FIELD) defaultSpeed = self.getParameterValue(self.DEFAULT_SPEED) tolerance = self.getParameterValue(self.TOLERANCE) fields = QgsFields() fields.append(QgsField('start', QVariant.String, '', 254, 0)) fields.append(QgsField('end', QVariant.String, '', 254, 0)) fields.append(QgsField('cost', QVariant.Double, '', 20, 7)) feat = QgsFeature() feat.setFields(fields) writer = self.getOutputFromName( self.OUTPUT_LAYER).getVectorWriter(fields, QgsWkbTypes.LineString, layer.crs(), context) tmp = startPoint.split(',') startPoint = QgsPoint(float(tmp[0]), float(tmp[1])) directionField = -1 if directionFieldName is not None: directionField = layer.fields().lookupField(directionFieldName) speedField = -1 if speedFieldName is not None: speedField = layer.fields().lookupField(speedFieldName) director = QgsVectorLayerDirector(layer, directionField, forwardValue, backwardValue, bothValue, defaultDirection) distUnit = iface.mapCanvas().mapSettings().destinationCrs().mapUnits() multiplier = QgsUnitTypes.fromUnitToUnitFactor(distUnit, QgsUnitTypes.DistanceMeters) if strategy == 0: strategy = QgsNetworkDistanceStrategy() else: strategy = QgsNetworkSpeedStrategy(speedField, defaultSpeed, multiplier * 1000.0 / 3600.0) multiplier = 3600 director.addStrategy(strategy) builder = QgsGraphBuilder(iface.mapCanvas().mapSettings().destinationCrs(), True, tolerance) feedback.pushInfo(self.tr('Loading end points...')) request = QgsFeatureRequest() request.setFlags(request.flags() ^ QgsFeatureRequest.SubsetOfAttributes) features = QgsProcessingUtils.getFeatures(endPoints, context, request) count = QgsProcessingUtils.featureCount(endPoints, context) points = [startPoint] for f in features: points.append(f.geometry().asPoint()) feedback.pushInfo(self.tr('Building graph...')) snappedPoints = director.makeGraph(builder, points) feedback.pushInfo(self.tr('Calculating shortest paths...')) graph = builder.graph() idxStart = graph.findVertex(snappedPoints[0]) tree, cost = QgsGraphAnalyzer.dijkstra(graph, idxStart, 0) route = [] total = 100.0 / count for i in range(1, count + 1): idxEnd = graph.findVertex(snappedPoints[i]) if tree[idxEnd] == -1: msg = self.tr('There is no route from start point ({}) to end point ({}).'.format(startPoint.toString(), points[i].toString())) feedback.setProgressText(msg) QgsMessageLog.logMessage(msg, self.tr('Processing'), QgsMessageLog.WARNING) continue cost = 0.0 current = idxEnd while current != idxStart: cost += graph.edge(tree[current]).cost(0) route.append(graph.vertex(graph.edge(tree[current]).inVertex()).point()) current = graph.edge(tree[current]).outVertex() route.append(snappedPoints[0]) route.reverse() geom = QgsGeometry.fromPolyline(route) feat.setGeometry(geom) feat['start'] = startPoint.toString() feat['end'] = points[i].toString() feat['cost'] = cost / multiplier writer.addFeature(feat) route[:] = [] feedback.setProgress(int(i * total)) del writer
gpl-2.0
CamelBackNotation/hackdfw
Dependencies/build/lib.linux-x86_64-2.7/pymouse/mac.py
10
5547
#Copyright 2013 Paul Barton # #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. import Quartz from AppKit import NSEvent, NSScreen from .base import PyMouseMeta, PyMouseEventMeta pressID = [None, Quartz.kCGEventLeftMouseDown, Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown] releaseID = [None, Quartz.kCGEventLeftMouseUp, Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp] class PyMouse(PyMouseMeta): def press(self, x, y, button=1): event = Quartz.CGEventCreateMouseEvent(None, pressID[button], (x, y), button - 1) Quartz.CGEventPost(Quartz.kCGHIDEventTap, event) def release(self, x, y, button=1): event = Quartz.CGEventCreateMouseEvent(None, releaseID[button], (x, y), button - 1) Quartz.CGEventPost(Quartz.kCGHIDEventTap, event) def move(self, x, y): move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0) Quartz.CGEventPost(Quartz.kCGHIDEventTap, move) def drag(self, x, y): drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0) Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag) def position(self): loc = NSEvent.mouseLocation() return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y def screen_size(self): return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height def scroll(self, vertical=None, horizontal=None, depth=None): #Local submethod for generating Mac scroll events in one axis at a time def scroll_event(y_move=0, x_move=0, z_move=0, n=1): for _ in range(abs(n)): scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent( None, # No source Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines 3, # Number of wheels(dimensions) y_move, x_move, z_move) Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent) #Execute vertical then horizontal then depth scrolling events if vertical is not None: vertical = int(vertical) if vertical == 0: # Do nothing with 0 distance pass elif vertical > 0: # Scroll up if positive scroll_event(y_move=1, n=vertical) else: # Scroll down if negative scroll_event(y_move=-1, n=abs(vertical)) if horizontal is not None: horizontal = int(horizontal) if horizontal == 0: # Do nothing with 0 distance pass elif horizontal > 0: # Scroll right if positive scroll_event(x_move=1, n=horizontal) else: # Scroll left if negative scroll_event(x_move=-1, n=abs(horizontal)) if depth is not None: depth = int(depth) if depth == 0: # Do nothing with 0 distance pass elif vertical > 0: # Scroll "out" if positive scroll_event(z_move=1, n=depth) else: # Scroll "in" if negative scroll_event(z_move=-1, n=abs(depth)) class PyMouseEvent(PyMouseEventMeta): def run(self): tap = Quartz.CGEventTapCreate( Quartz.kCGSessionEventTap, Quartz.kCGHeadInsertEventTap, Quartz.kCGEventTapOptionDefault, Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) | Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) | Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) | Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) | Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) | Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) | Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp), self.handler, None) loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0) loop = Quartz.CFRunLoopGetCurrent() Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode) Quartz.CGEventTapEnable(tap, True) while self.state: Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False) def handler(self, proxy, type, event, refcon): (x, y) = Quartz.CGEventGetLocation(event) if type in pressID: self.click(x, y, pressID.index(type), True) elif type in releaseID: self.click(x, y, releaseID.index(type), False) else: self.move(x, y) if self.capture: Quartz.CGEventSetType(event, Quartz.kCGEventNull) return event
mit
BerserkerTroll/root
bindings/pyroot/JupyROOT/utils.py
6
14510
# -*- coding:utf-8 -*- #----------------------------------------------------------------------------- # Author: Danilo Piparo <[email protected]> CERN #----------------------------------------------------------------------------- from __future__ import print_function import os import sys import select import tempfile import pty import itertools import re import fnmatch import time from hashlib import sha1 from contextlib import contextmanager from subprocess import check_output from IPython import get_ipython from IPython.display import HTML from IPython.core.extensions import ExtensionManager import IPython.display import ROOT from JupyROOT import handlers # We want iPython to take over the graphics ROOT.gROOT.SetBatch() cppMIME = 'text/x-c++src' _jsMagicHighlight = """ Jupyter.CodeCell.options_default.highlight_modes['magic_{cppMIME}'] = {{'reg':[/^%%cpp/]}}; console.log("JupyROOT - %%cpp magic configured"); """ _jsNotDrawableClassesPatterns = ["TGraph[23]D","TH3*","TGraphPolar","TProf*","TEve*","TF[23]","TGeo*","TPolyLine3D", "TH2Poly"] _jsROOTSourceDir = "https://root.cern.ch/js/notebook/" _jsCanvasWidth = 800 _jsCanvasHeight = 600 _jsCode = """ <div id="{jsDivId}" style="width: {jsCanvasWidth}px; height: {jsCanvasHeight}px"> </div> <script> requirejs.config({{ paths: {{ 'JSRootCore' : '{jsROOTSourceDir}/scripts/JSRootCore', }} }}); require(['JSRootCore'], function(Core) {{ var obj = Core.JSONR_unref({jsonContent}); Core.draw("{jsDivId}", obj, "{jsDrawOptions}"); }} ); </script> """ TBufferJSONErrorMessage="The TBufferJSON class is necessary for JS visualisation to work and cannot be found. Did you enable the http module (-D http=ON for CMake)?" def TBufferJSONAvailable(): if hasattr(ROOT,"TBufferJSON"): return True print(TBufferJSONErrorMessage, file=sys.stderr) return False _enableJSVis = False _enableJSVisDebug = False def enableJSVis(): if not TBufferJSONAvailable(): return global _enableJSVis _enableJSVis = True def disableJSVis(): global _enableJSVis _enableJSVis = False def enableJSVisDebug(): if not TBufferJSONAvailable(): return global _enableJSVis global _enableJSVisDebug _enableJSVis = True _enableJSVisDebug = True def disableJSVisDebug(): global _enableJSVis global _enableJSVisDebug _enableJSVis = False _enableJSVisDebug = False def _getPlatform(): return sys.platform def _getLibExtension(thePlatform): '''Return appropriate file extension for a shared library >>> _getLibExtension('darwin') '.dylib' >>> _getLibExtension('win32') '.dll' >>> _getLibExtension('OddPlatform') '.so' ''' pExtMap = { 'darwin' : '.dylib', 'win32' : '.dll' } return pExtMap.get(thePlatform, '.so') def welcomeMsg(): print("Welcome to JupyROOT %s" %ROOT.gROOT.GetVersion()) @contextmanager def _setIgnoreLevel(level): originalLevel = ROOT.gErrorIgnoreLevel ROOT.gErrorIgnoreLevel = level yield ROOT.gErrorIgnoreLevel = originalLevel def commentRemover( text ): ''' >>> s="// hello" >>> commentRemover(s) '' >>> s="int /** Test **/ main() {return 0;}" >>> commentRemover(s) 'int main() {return 0;}' ''' def blotOutNonNewlines( strIn ) : # Return a string containing only the newline chars contained in strIn return "" + ("\n" * strIn.count('\n')) def replacer( match ) : s = match.group(0) if s.startswith('/'): # Matched string is //...EOL or /*...*/ ==> Blot out all non-newline chars return blotOutNonNewlines(s) else: # Matched string is '...' or "..." ==> Keep unchanged return s pattern = re.compile(\ r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE) return re.sub(pattern, replacer, text) # Here functions are defined to process C++ code def processCppCodeImpl(code): #code = commentRemover(code) ROOT.gInterpreter.ProcessLine(code) def processMagicCppCodeImpl(code): err = ROOT.ProcessLineWrapper(code) if err == ROOT.TInterpreter.kProcessing: ROOT.gInterpreter.ProcessLine('.@') ROOT.gInterpreter.ProcessLine('cerr << "Unbalanced braces. This cell was not processed." << endl;') def declareCppCodeImpl(code): #code = commentRemover(code) ROOT.gInterpreter.Declare(code) def processCppCode(code): processCppCodeImpl(code) def processMagicCppCode(code): processMagicCppCodeImpl(code) def declareCppCode(code): declareCppCodeImpl(code) def _checkOutput(command,errMsg=None): out = "" try: out = check_output(command.split()) except: if errMsg: sys.stderr.write("%s (command was %s)\n" %(errMsg,command)) return out def _invokeAclicMac(fileName): '''FIXME! This function is a workaround. On osx, it is impossible to link against libzmq.so, among the others. The error is known and is "ld: can't link with bundle (MH_BUNDLE) only dylibs (MH_DYLIB)" We cannot at the moment force Aclic to change the linker command in order to exclude these libraries, so we launch a second root session to compile the library, which we then load. ''' command = 'root -l -q -b -e gSystem->CompileMacro(\"%s\",\"k\")*0'%fileName out = _checkOutput(command, "Error ivoking ACLiC") libNameBase = fileName.replace(".C","_C") ROOT.gSystem.Load(libNameBase) def _codeToFilename(code): '''Convert code to a unique file name >>> _codeToFilename("int f(i){return i*i;}") 'dbf7e731.C' ''' fileNameBase = sha1(code.encode('utf-8')).hexdigest()[0:8] return fileNameBase + ".C" def _dumpToUniqueFile(code): '''Dump code to file whose name is unique >>> _codeToFilename("int f(i){return i*i;}") 'dbf7e731.C' ''' fileName = _codeToFilename(code) with open (fileName,'w') as ofile: ofile.write(code) return fileName def isPlatformApple(): return _getPlatform() == 'darwin'; def invokeAclic(cell): fileName = _dumpToUniqueFile(cell) if isPlatformApple(): _invokeAclicMac(fileName) else: processCppCode(".L %s+" %fileName) transformers = [] class StreamCapture(object): def __init__(self, ip=get_ipython()): # For the registration self.shell = ip self.ioHandler = handlers.IOHandler() self.flag = True self.outString = "" self.errString = "" self.asyncCapturer = handlers.Runner(self.syncCapture) self.isFirstPreExecute = True self.isFirstPostExecute = True def syncCapture(self, defout = ''): self.outString = defout self.errString = defout waitTimes = [.01, .01, .02, .04, .06, .08, .1] lenWaitTimes = 7 iterIndex = 0 while self.flag: self.ioHandler.Poll() if not self.flag: return waitTime = .1 if iterIndex >= lenWaitTimes else waitTimes[iterIndex] time.sleep(waitTime) def pre_execute(self): if self.isFirstPreExecute: self.isFirstPreExecute = False return 0 self.flag = True self.ioHandler.Clear() self.ioHandler.InitCapture() self.asyncCapturer.AsyncRun('') def post_execute(self): if self.isFirstPostExecute: self.isFirstPostExecute = False self.isFirstPreExecute = False return 0 self.flag = False self.asyncCapturer.Wait() self.ioHandler.Poll() self.ioHandler.EndCapture() # Print for the notebook out = self.ioHandler.GetStdout() err = self.ioHandler.GetStderr() if not transformers: sys.stdout.write(out) sys.stderr.write(err) else: for t in transformers: (out, err, otype) = t(out, err) if otype == 'html': IPython.display.display(HTML(out)) IPython.display.display(HTML(err)) return 0 def register(self): self.shell.events.register('pre_execute', self.pre_execute) self.shell.events.register('post_execute', self.post_execute) def GetCanvasDrawers(): lOfC = ROOT.gROOT.GetListOfCanvases() return [NotebookDrawer(can) for can in lOfC if can.IsDrawn()] def GetGeometryDrawer(): if not hasattr(ROOT,'gGeoManager'): return if not ROOT.gGeoManager: return if not ROOT.gGeoManager.GetUserPaintVolume(): return vol = ROOT.gGeoManager.GetTopVolume() if vol: return NotebookDrawer(vol) def GetDrawers(): drawers = GetCanvasDrawers() geometryDrawer = GetGeometryDrawer() if geometryDrawer: drawers.append(geometryDrawer) return drawers def DrawGeometry(): drawer = GetGeometryDrawer() if drawer: drawer.Draw() def DrawCanvases(): drawers = GetCanvasDrawers() for drawer in drawers: drawer.Draw() def NotebookDraw(): DrawGeometry() DrawCanvases() class CaptureDrawnPrimitives(object): ''' Capture the canvas which is drawn to display it. ''' def __init__(self, ip=get_ipython()): self.shell = ip def _post_execute(self): NotebookDraw() def register(self): self.shell.events.register('post_execute', self._post_execute) class NotebookDrawer(object): ''' Capture the canvas which is drawn and decide if it should be displayed using jsROOT. ''' jsUID = 0 def __init__(self, theObject): self.drawableObject = theObject self.isCanvas = self.drawableObject.ClassName() == "TCanvas" def __del__(self): if self.isCanvas: self.drawableObject.ResetDrawn() else: ROOT.gGeoManager.SetUserPaintVolume(None) def _getListOfPrimitivesNamesAndTypes(self): """ Get the list of primitives in the pad, recursively descending into histograms and graphs looking for fitted functions. """ primitives = self.drawableObject.GetListOfPrimitives() primitivesNames = map(lambda p: p.ClassName(), primitives) return sorted(primitivesNames) def _getUID(self): ''' Every DIV containing a JavaScript snippet must be unique in the notebook. This methods provides a unique identifier. ''' NotebookDrawer.jsUID += 1 return NotebookDrawer.jsUID def _canJsDisplay(self): if not TBufferJSONAvailable(): return False if not self.isCanvas: return True # to be optimised if not _enableJSVis: return False primitivesTypesNames = self._getListOfPrimitivesNamesAndTypes() for unsupportedPattern in _jsNotDrawableClassesPatterns: for primitiveTypeName in primitivesTypesNames: if fnmatch.fnmatch(primitiveTypeName,unsupportedPattern): print("The canvas contains an object of a type jsROOT cannot currently handle (%s). Falling back to a static png." %primitiveTypeName, file=sys.stderr) return False return True def _getJsCode(self): # Workaround to have ConvertToJSON work json = ROOT.TBufferJSON.ConvertToJSON(self.drawableObject, 3) # Here we could optimise the string manipulation divId = 'root_plot_' + str(self._getUID()) height = _jsCanvasHeight width = _jsCanvasHeight options = "all" if self.isCanvas: height = self.drawableObject.GetWw() width = self.drawableObject.GetWh() options = "" thisJsCode = _jsCode.format(jsCanvasWidth = height, jsCanvasHeight = width, jsROOTSourceDir = _jsROOTSourceDir, jsonContent = json.Data(), jsDrawOptions = options, jsDivId = divId) return thisJsCode def _getJsDiv(self): return HTML(self._getJsCode()) def _jsDisplay(self): IPython.display.display(self._getJsDiv()) return 0 def _getPngImage(self): ofile = tempfile.NamedTemporaryFile(suffix=".png") with _setIgnoreLevel(ROOT.kError): self.drawableObject.SaveAs(ofile.name) img = IPython.display.Image(filename=ofile.name, format='png', embed=True) return img def _pngDisplay(self): img = self._getPngImage() IPython.display.display(img) def _display(self): if _enableJSVisDebug: self._pngDisplay() self._jsDisplay() else: if self._canJsDisplay(): self._jsDisplay() else: self._pngDisplay() def GetDrawableObjects(self): if not self.isCanvas: return [self._getJsDiv()] if _enableJSVisDebug: return [self._getJsDiv(),self._getPngImage()] if self._canJsDisplay(): return [self._getJsDiv()] else: return [self._getPngImage()] def Draw(self): self._display() return 0 def setStyle(): style=ROOT.gStyle style.SetFuncWidth(2) captures = [] def loadMagicsAndCapturers(): global captures extNames = ["JupyROOT.magics." + name for name in ["cppmagic","jsrootmagic"]] ip = get_ipython() extMgr = ExtensionManager(ip) for extName in extNames: extMgr.load_extension(extName) captures.append(StreamCapture()) captures.append(CaptureDrawnPrimitives()) for capture in captures: capture.register() def declareProcessLineWrapper(): ROOT.gInterpreter.Declare(""" TInterpreter::EErrorCode ProcessLineWrapper(const char* line) { TInterpreter::EErrorCode err; gInterpreter->ProcessLine(line, &err); return err; } """) def enhanceROOTModule(): ROOT.enableJSVis = enableJSVis ROOT.disableJSVis = disableJSVis ROOT.enableJSVisDebug = enableJSVisDebug ROOT.disableJSVisDebug = disableJSVisDebug def enableCppHighlighting(): ipDispJs = IPython.display.display_javascript # Define highlight mode for %%cpp magic ipDispJs(_jsMagicHighlight.format(cppMIME = cppMIME), raw=True) def iPythonize(): setStyle() loadMagicsAndCapturers() declareProcessLineWrapper() #enableCppHighlighting() enhanceROOTModule() welcomeMsg()
lgpl-2.1
IsCoolEntertainment/debpkg_python-boto
boto/ec2/elb/securitygroup.py
57
1576
# Copyright (c) 2010 Reza Lotun http://reza.lotun.name # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class SecurityGroup(object): def __init__(self, connection=None): self.name = None self.owner_alias = None def __repr__(self): return 'SecurityGroup(%s, %s)' % (self.name, self.owner_alias) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'GroupName': self.name = value elif name == 'OwnerAlias': self.owner_alias = value
mit
eoncloud-dev/eonboard
eoncloud_web/cloud/api/swift.py
10
11697
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import six.moves.urllib.parse as urlparse import swiftclient from django.conf import settings from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon.utils.memoized import memoized # noqa from openstack_dashboard.api import base from openstack_dashboard.openstack.common import timeutils LOG = logging.getLogger(__name__) FOLDER_DELIMITER = "/" # Swift ACL GLOBAL_READ_ACL = ".r:*" LIST_CONTENTS_ACL = ".rlistings" class Container(base.APIDictWrapper): pass class StorageObject(base.APIDictWrapper): def __init__(self, apidict, container_name, orig_name=None, data=None): super(StorageObject, self).__init__(apidict) self.container_name = container_name self.orig_name = orig_name self.data = data @property def id(self): return self.name class PseudoFolder(base.APIDictWrapper): def __init__(self, apidict, container_name): super(PseudoFolder, self).__init__(apidict) self.container_name = container_name @property def id(self): return '%s/%s' % (self.container_name, self.name) @property def name(self): return self.subdir.rstrip(FOLDER_DELIMITER) @property def bytes(self): return None @property def content_type(self): return "application/pseudo-folder" def _objectify(items, container_name): """Splits a listing of objects into their appropriate wrapper classes.""" objects = [] # Deal with objects and object pseudo-folders first, save subdirs for later for item in items: if item.get("subdir", None) is not None: object_cls = PseudoFolder else: object_cls = StorageObject objects.append(object_cls(item, container_name)) return objects def _metadata_to_header(metadata): headers = {} public = metadata.get('is_public') if public is True: public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL] headers['x-container-read'] = ",".join(public_container_acls) elif public is False: headers['x-container-read'] = "" return headers @memoized def swift_api(request): endpoint = base.url_for(request, 'object-store') cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) LOG.debug('Swift connection created using token "%s" and url "%s"' % (request.user.token.id, endpoint)) return swiftclient.client.Connection(None, request.user.username, None, preauthtoken=request.user.token.id, preauthurl=endpoint, cacert=cacert, insecure=insecure, auth_version="2.0") def swift_container_exists(request, container_name): try: swift_api(request).head_container(container_name) return True except swiftclient.client.ClientException: return False def swift_object_exists(request, container_name, object_name): try: swift_api(request).head_object(container_name, object_name) return True except swiftclient.client.ClientException: return False def swift_get_containers(request, marker=None): limit = getattr(settings, 'API_RESULT_LIMIT', 1000) headers, containers = swift_api(request).get_account(limit=limit + 1, marker=marker, full_listing=True) container_objs = [Container(c) for c in containers] if(len(container_objs) > limit): return (container_objs[0:-1], True) else: return (container_objs, False) def swift_get_container(request, container_name, with_data=True): if with_data: headers, data = swift_api(request).get_object(container_name, "") else: data = None headers = swift_api(request).head_container(container_name) timestamp = None is_public = False public_url = None try: is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '') if is_public: swift_endpoint = base.url_for(request, 'object-store', endpoint_type='publicURL') public_url = swift_endpoint + '/' + urlparse.quote(container_name) ts_float = float(headers.get('x-timestamp')) timestamp = timeutils.iso8601_from_timestamp(ts_float) except Exception: pass container_info = { 'name': container_name, 'container_object_count': headers.get('x-container-object-count'), 'container_bytes_used': headers.get('x-container-bytes-used'), 'timestamp': timestamp, 'data': data, 'is_public': is_public, 'public_url': public_url, } return Container(container_info) def swift_create_container(request, name, metadata=({})): if swift_container_exists(request, name): raise exceptions.AlreadyExists(name, 'container') headers = _metadata_to_header(metadata) swift_api(request).put_container(name, headers=headers) return Container({'name': name}) def swift_update_container(request, name, metadata=({})): headers = _metadata_to_header(metadata) swift_api(request).post_container(name, headers=headers) return Container({'name': name}) def swift_delete_container(request, name): # It cannot be deleted if it's not empty. The batch remove of objects # be done in swiftclient instead of Horizon. objects, more = swift_get_objects(request, name) if objects: error_msg = unicode(_("The container cannot be deleted " "since it's not empty.")) exc = exceptions.Conflict(error_msg) exc._safe_message = error_msg raise exc swift_api(request).delete_container(name) return True def swift_get_objects(request, container_name, prefix=None, marker=None, limit=None): limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000) kwargs = dict(prefix=prefix, marker=marker, limit=limit + 1, delimiter=FOLDER_DELIMITER, full_listing=True) headers, objects = swift_api(request).get_container(container_name, **kwargs) object_objs = _objectify(objects, container_name) if(len(object_objs) > limit): return (object_objs[0:-1], True) else: return (object_objs, False) def swift_filter_objects(request, filter_string, container_name, prefix=None, marker=None): # FIXME(kewu): Swift currently has no real filtering API, thus the marker # parameter here won't actually help the pagination. For now I am just # getting the largest number of objects from a container and filtering # based on those objects. limit = 9999 objects = swift_get_objects(request, container_name, prefix=prefix, marker=marker, limit=limit) filter_string_list = filter_string.lower().strip().split(' ') def matches_filter(obj): for q in filter_string_list: return wildcard_search(obj.name.lower(), q) return filter(matches_filter, objects[0]) def wildcard_search(string, q): q_list = q.split('*') if all(map(lambda x: x == '', q_list)): return True elif q_list[0] not in string: return False else: if q_list[0] == '': tail = string else: head, delimiter, tail = string.partition(q_list[0]) return wildcard_search(tail, '*'.join(q_list[1:])) def swift_copy_object(request, orig_container_name, orig_object_name, new_container_name, new_object_name): if swift_object_exists(request, new_container_name, new_object_name): raise exceptions.AlreadyExists(new_object_name, 'object') headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name, orig_object_name])} return swift_api(request).put_object(new_container_name, new_object_name, None, headers=headers) def swift_upload_object(request, container_name, object_name, object_file=None): headers = {} size = 0 if object_file: headers['X-Object-Meta-Orig-Filename'] = object_file.name size = object_file.size etag = swift_api(request).put_object(container_name, object_name, object_file, headers=headers) obj_info = {'name': object_name, 'bytes': size, 'etag': etag} return StorageObject(obj_info, container_name) def swift_create_pseudo_folder(request, container_name, pseudo_folder_name): headers = {} etag = swift_api(request).put_object(container_name, pseudo_folder_name, None, headers=headers) obj_info = { 'name': pseudo_folder_name, 'etag': etag } return PseudoFolder(obj_info, container_name) def swift_delete_object(request, container_name, object_name): swift_api(request).delete_object(container_name, object_name) return True def swift_get_object(request, container_name, object_name, with_data=True): if with_data: headers, data = swift_api(request).get_object(container_name, object_name) else: data = None headers = swift_api(request).head_object(container_name, object_name) orig_name = headers.get("x-object-meta-orig-filename") timestamp = None try: ts_float = float(headers.get('x-timestamp')) timestamp = timeutils.iso8601_from_timestamp(ts_float) except Exception: pass obj_info = { 'name': object_name, 'bytes': headers.get('content-length'), 'content_type': headers.get('content-type'), 'etag': headers.get('etag'), 'timestamp': timestamp, } return StorageObject(obj_info, container_name, orig_name=orig_name, data=data)
apache-2.0
arcsun/neteaseMenu
start.py
1
11671
#coding=utf-8 from flask import Flask, redirect, render_template, request, Response from codepy import menulog import anydbm as dbm import shelve import os, sys import urllib from datetime import datetime import time import urllib2 import hashlib app = Flask(__name__) visit = 0 visitHome = 0 startTime = time.time() token = 'hzsunzhengyu' # 微信公众号的token,自行设置 cache = {} s = None def checkSign(signature, timestamp, nonce): # 微信签名 args = [] args.append("token=%s" % token) args.append("timestamp=%s" % timestamp) args.append("nonce=%s" % nonce) args = sorted(args) raw = "&".join(args) sign = hashlib.sha1(raw).hexdigest() menulog.info(signature) menulog.info(sign) return signature == sign def saveCache(key, content): """ 现在需要服务器中转才能访问,做个简单的缓存 """ if len(cache) >= 10: cache.clear() cache[key] = content def addOne(page= 1): """访问计数""" try: if not s: globals()['s'] = shelve.open('visit_count.dat', writeback=True) if page == 0: s['count_home'] = 0 if s.get('count_home') is None else s['count_home']+1 elif page == 1: s['count_menu'] = 0 if s.get('count_menu') is None else s['count_menu']+1 s.sync() except Exception as e: menulog.debug(e) @app.route('/menu/cache') def getCache(): return str(cache.keys()) def getWebContent(url): try: fname = url.split('?')[1].replace('=', '_') if cache.get(fname): return cache.get(fname) else: req = urllib2.Request(url+ '&companyId=1') # update:增加了这个参数 req.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 6.0; PRO 6 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/44.0.2403.130 Mobile Safari/537.36 YiXin/4.8.3') res = urllib2.urlopen(req) html = res.read().decode('utf-8') saveCache(fname, html) return html except Exception as e: menulog.debug(str(e)) return '' @app.route('/') def hello_world(): return redirect('/menu') @app.route('/menus/sign') def weixin_sign(): # 微信配置认证 menulog.info('weixin sign') signature = request.args.get('signature', '') timestamp = request.args.get('timestamp', '') nonce = request.args.get('nonce', '') echostr = request.args.get('echostr', '') valid = checkSign(signature, timestamp, nonce) if valid: return echostr else: # 目前签名有bug,暂都返回成功 return echostr @app.route('/menu/<int:day>', methods = ['GET', 'POST']) def menu(day=0): # 0今天, 1明天, 151202指定日期 if request.method == 'POST': day = int(request.form['day']) # update:现在易信增加了对User-Agent的限制,必须使用中转的接口了 return redirect('/menus/%s'% day) # from codepy import menu # globals()['visit'] += 1 # menulog.info(u'访问菜单@%s'% visit) # url = menu.Menu(day).process() # if url.startswith('http'): # return redirect(url) # else: # return url @app.route('/menus/<int:day>', methods = ['GET', 'POST']) def menus(day=0): # 为解决微信内跳转卡住的问题, 增加这个方法 # 服务器从易信读取网页信息后再返回给用户 from codepy import menu if request.method == 'POST': day = int(request.form['day']) addOne(1) globals()['visit'] += 1 menulog.info(u'访问菜单@%s'% visit) url = menu.Menu(day).process() if url.startswith('http'): return getWebContent(url) else: return url @app.route('/menus/bus') def bus(): # 班车路线页, 中转一下 addOne(1) globals()['visit'] += 1 menulog.info(u'访问菜单@%s'% visit) url = "http://numenplus.yixin.im/multiNewsWap.do?multiNewsId=17011" # 更新周期很长,暂手动更新 try: return getWebContent(url) except: return u'网页访问出错' def getWeekDayFromDay(daytime): """根据日期(如20160517)计算是星期几""" try: daytime = '20'+ str(daytime) # '20160517' year = int(daytime[:4]) # 2016 month = int(daytime[4:6]) # 5 day = int(daytime[6:8]) # 17 weekday = datetime(year, month, day, 0, 0, 0, 0).weekday() weekdaynames= { 0: u'星期一', 1: u'星期二', 2: u'星期三', 3: u'星期四', 4: u'星期五', 5: u'星期六', 6: u'星期日', } return weekdaynames.get(weekday, u'') except: menulog.debug(u'获取星期几错误') return u'' @app.route('/menu') def menuList(): addOne(0) globals()['visitHome'] += 1 menulog.info(u'访问主页@%s'% visitHome) try: db = dbm.open('datafile', 'c') cache = eval(db['cache']) future = eval(db['future']) maybe = eval(db['maybe']) maybe.sort() vals = {} for day in future: vals[day] = cache[day] db.close() weekdays = {} for day in vals.keys(): weekdays[day] = getWeekDayFromDay(day) return render_template('menu.html', vals= vals, days= future, weekdays= weekdays, maybe= maybe, total=(s.get('count_menu'), s.get('count_home'))) except (IOError, KeyError): msg = u'缓存读取错误' menulog.info(msg) return msg @app.route('/menu/manage/hzmenu') def manage(): seconds = int(time.time()- startTime) days = seconds/(24*60*60) if days >= 1: seconds -= 24*60*60*days hours = seconds/(60*60) if hours >= 1: seconds -= 60*60*hours miniutes = seconds/60 if miniutes >= 1: seconds -= 60*miniutes timestr = u'本次已运行:%s天%s小时%s分钟%s秒'% (days, hours, miniutes, seconds) return render_template('manage.html', visit= visit, visitHome= visitHome, timestr= timestr, total=(s.get('count_menu'), s.get('count_home'))) @app.route('/menu/info') def info(): try: db = dbm.open('datafile', 'r') msg = str(db) db.close() return msg except (IOError, KeyError): return u'缓存读取错误' @app.route('/menu/delete/<int:day>', methods = ['GET', 'POST']) def delete(day= 150101): try: db = dbm.open('datafile', 'w') if request.method == 'POST': day = int(request.form['day']) cache = eval(db['cache']) if cache.has_key(day): del cache[day] msg = u'删除%s'% day else: msg = u'del key not found' menulog.info(msg) db['cache'] = str(cache) db.close() return msg except (IOError, KeyError): return u'缓存读取错误' @app.route('/menu/delfuture/<int:day>', methods = ['GET', 'POST']) def delfuture(day= 161300): try: db = dbm.open('datafile', 'w') if request.method == 'POST': day = int(request.form['day']) future = eval(db['future']) if day in future: future.remove(day) msg = u'删除%s'% day else: msg = u'del key not found' menulog.info(msg) db['future'] = str(future) db.close() delete(day) return msg except (IOError, KeyError) as e: print e return u'缓存读取错误' @app.route('/menu/refreshlist') def refreshlist(): try: db = dbm.open('datafile', 'w') cache = eval(db['cache']) future = [] today = int(time.strftime('%y%m%d',time.localtime(time.time()))) for day in cache.keys(): if day >= today: future.append(day) future.sort() db['future'] = str(future) msg = u'更新%s后已找到的菜单列表 from homepage'% today menulog.info(msg) db.close() return msg except (IOError, KeyError): return u'缓存读取错误' @app.route('/menu/clear') def clearMaybe(): # 清空可能的菜单(maybe=[]) try: db = dbm.open('datafile', 'w') db['maybe'] = '[]' db.close() msg = u'清空maybe' menulog.info(msg) return msg except (IOError, KeyError): msg = u'缓存读取错误' menulog.info(msg) return msg @app.route('/menu/start/<int:startid>', methods = ['GET', 'POST']) def start(startid= 17000): # 设置起始查找点为指定值 try: if request.method == 'POST': startid = int(request.form['startid']) db = dbm.open('datafile', 'w') db['startId'] = str(startid) db.close() msg = u'设置查找起点ID为:%d'% startid menulog.info(msg) return msg except (IOError, KeyError): msg = u'缓存/POST参数读取错误' menulog.info(msg) return msg @app.route('/menu/add/<int:day>/<int:mid>', methods = ['GET', 'POST']) def add(day= 151203, mid= 17063): # 手动添加一个菜单(偶尔发布者会填错日期) try: db = dbm.open('datafile', 'w') cache = eval(db['cache']) if request.method == 'POST': day = int(request.form['day']) mid = int(request.form['mid']) cache[day] = mid db['cache'] = str(cache) msg = u'更新%s的菜单id为%s'% (day, mid) menulog.info(msg) db.close() return msg except (IOError, KeyError): msg = u'缓存/POST参数读取错误' menulog.info(msg) return msg @app.route('/menu/log/<int:lines>') def readLog(lines= 0): # 读取多少行log, 0为全部 f = None try: files = os.listdir('./') files.sort() logs = [] for fname in files: if fname.startswith('menu.log'): logs.append(fname) if logs: f = open(logs[-1]) contents = f.readlines() content = '' if lines == 0: lines = len(contents) line = 0 for msg in reversed(contents): line += 1 if line < lines: content += msg+ '<br>' else: break return content.decode('utf-8') else: return u'暂无日志' except IOError: return '读取日志出错' finally: if f: f.close() @app.route('/api/v1/verify', methods=['POST', 'GET']) def mockYidun(): resp = Response('{"msg":"success","result":true,"c":1,"error":0}') resp.headers['Content-Type'] = 'application/json;charset=UTF-8' return resp @app.route('/api/v2/verify', methods=['POST', 'GET']) def mockYidun2(): resp = Response('{"msg":"success","result":true,"c":1,"error":0}') resp.headers['Content-Type'] = 'application/json;charset=UTF-8' return resp if __name__ == '__main__': if sys.platform.startswith('win'): # 本地调试 # import webbrowser # webbrowser.open('http://127.0.0.1:80/menu') app.run(host='127.0.0.1', port= 80, debug= True) elif len(sys.argv)> 1: # 线上调试, 随便传个参数 app.run(host='0.0.0.0', port= 5000, debug= True) else: # 线上正式版本, 用gunicorn启动 from werkzeug.contrib.fixers import ProxyFix app.wsgi_app = ProxyFix(app.wsgi_app) app.run(host='0.0.0.0', port= 5000)
apache-2.0
gregdek/ansible
lib/ansible/modules/network/fortios/fortios_firewall_address.py
7
15896
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2018 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_firewall_address short_description: Configure IPv4 addresses. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to configure firewall feature and address category. Examples includes all options and need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip adress. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: false firewall_address: description: - Configure IPv4 addresses. default: null suboptions: state: description: - Indicates whether to create or remove the object choices: - present - absent allow-routing: description: - Enable/disable use of this address in the static route configuration. choices: - enable - disable associated-interface: description: - Network interface associated with address. Source system.interface.name system.zone.name. cache-ttl: description: - Defines the minimal TTL of individual IP addresses in FQDN cache measured in seconds. color: description: - Color of icon on the GUI. comment: description: - Comment. country: description: - IP addresses associated to a specific country. end-ip: description: - Final IP address (inclusive) in the range for the address. epg-name: description: - Endpoint group name. filter: description: - Match criteria filter. fqdn: description: - Fully Qualified Domain Name address. list: description: - IP address list. suboptions: ip: description: - IP. required: true name: description: - Address name. required: true obj-id: description: - Object ID for NSX. organization: description: - "Organization domain name (Syntax: organization/domain)." policy-group: description: - Policy group name. sdn: description: - SDN. choices: - aci - aws - azure - gcp - nsx - nuage - oci sdn-tag: description: - SDN Tag. start-ip: description: - First IP address (inclusive) in the range for the address. subnet: description: - IP address and subnet mask of address. subnet-name: description: - Subnet name. tagging: description: - Config object tagging. suboptions: category: description: - Tag category. Source system.object-tagging.category. name: description: - Tagging entry name. required: true tags: description: - Tags. suboptions: name: description: - Tag name. Source system.object-tagging.tags.name. required: true tenant: description: - Tenant. type: description: - Type of address. choices: - ipmask - iprange - fqdn - geography - wildcard - wildcard-fqdn - dynamic uuid: description: - Universally Unique Identifier (UUID; automatically assigned but can be manually reset). visibility: description: - Enable/disable address visibility in the GUI. choices: - enable - disable wildcard: description: - IP address and wildcard netmask. wildcard-fqdn: description: - Fully Qualified Domain Name with wildcard characters. ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure IPv4 addresses. fortios_firewall_address: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" firewall_address: state: "present" allow-routing: "enable" associated-interface: "<your_own_value> (source system.interface.name system.zone.name)" cache-ttl: "5" color: "6" comment: "Comment." country: "<your_own_value>" end-ip: "<your_own_value>" epg-name: "<your_own_value>" filter: "<your_own_value>" fqdn: "<your_own_value>" list: - ip: "<your_own_value>" name: "default_name_15" obj-id: "<your_own_value>" organization: "<your_own_value>" policy-group: "<your_own_value>" sdn: "aci" sdn-tag: "<your_own_value>" start-ip: "<your_own_value>" subnet: "<your_own_value>" subnet-name: "<your_own_value>" tagging: - category: "<your_own_value> (source system.object-tagging.category)" name: "default_name_26" tags: - name: "default_name_28 (source system.object-tagging.tags.name)" tenant: "<your_own_value>" type: "ipmask" uuid: "<your_own_value>" visibility: "enable" wildcard: "<your_own_value>" wildcard-fqdn: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "key1" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_firewall_address_data(json): option_list = ['allow-routing', 'associated-interface', 'cache-ttl', 'color', 'comment', 'country', 'end-ip', 'epg-name', 'filter', 'fqdn', 'list', 'name', 'obj-id', 'organization', 'policy-group', 'sdn', 'sdn-tag', 'start-ip', 'subnet', 'subnet-name', 'tagging', 'tenant', 'type', 'uuid', 'visibility', 'wildcard', 'wildcard-fqdn'] dictionary = {} for attribute in option_list: if attribute in json: dictionary[attribute] = json[attribute] return dictionary def firewall_address(data, fos): vdom = data['vdom'] firewall_address_data = data['firewall_address'] filtered_data = filter_firewall_address_data(firewall_address_data) if firewall_address_data['state'] == "present": return fos.set('firewall', 'address', data=filtered_data, vdom=vdom) elif firewall_address_data['state'] == "absent": return fos.delete('firewall', 'address', mkey=filtered_data['name'], vdom=vdom) def fortios_firewall(data, fos): login(data) methodlist = ['firewall_address'] for method in methodlist: if data[method]: resp = eval(method)(data, fos) break fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": "False"}, "firewall_address": { "required": False, "type": "dict", "options": { "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "allow-routing": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "associated-interface": {"required": False, "type": "str"}, "cache-ttl": {"required": False, "type": "int"}, "color": {"required": False, "type": "int"}, "comment": {"required": False, "type": "str"}, "country": {"required": False, "type": "str"}, "end-ip": {"required": False, "type": "str"}, "epg-name": {"required": False, "type": "str"}, "filter": {"required": False, "type": "str"}, "fqdn": {"required": False, "type": "str"}, "list": {"required": False, "type": "list", "options": { "ip": {"required": True, "type": "str"} }}, "name": {"required": True, "type": "str"}, "obj-id": {"required": False, "type": "str"}, "organization": {"required": False, "type": "str"}, "policy-group": {"required": False, "type": "str"}, "sdn": {"required": False, "type": "str", "choices": ["aci", "aws", "azure", "gcp", "nsx", "nuage", "oci"]}, "sdn-tag": {"required": False, "type": "str"}, "start-ip": {"required": False, "type": "str"}, "subnet": {"required": False, "type": "str"}, "subnet-name": {"required": False, "type": "str"}, "tagging": {"required": False, "type": "list", "options": { "category": {"required": False, "type": "str"}, "name": {"required": True, "type": "str"}, "tags": {"required": False, "type": "list", "options": { "name": {"required": True, "type": "str"} }} }}, "tenant": {"required": False, "type": "str"}, "type": {"required": False, "type": "str", "choices": ["ipmask", "iprange", "fqdn", "geography", "wildcard", "wildcard-fqdn", "dynamic"]}, "uuid": {"required": False, "type": "str"}, "visibility": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "wildcard": {"required": False, "type": "str"}, "wildcard-fqdn": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_firewall(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
flyher/pymo
android/pgs4a-0.9.6/python-install/lib/python2.7/distutils/tests/setuptools_build_ext.py
149
11489
from distutils.command.build_ext import build_ext as _du_build_ext try: # Attempt to use Pyrex for building extensions, if available from Pyrex.Distutils.build_ext import build_ext as _build_ext except ImportError: _build_ext = _du_build_ext import os, sys from distutils.file_util import copy_file from distutils.tests.setuptools_extension import Library from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var get_config_var("LDSHARED") # make sure _config_vars is initialized from distutils.sysconfig import _config_vars from distutils import log from distutils.errors import * have_rtld = False use_stubs = False libtype = 'shared' if sys.platform == "darwin": use_stubs = True elif os.name != 'nt': try: from dl import RTLD_NOW have_rtld = True use_stubs = True except ImportError: pass def if_dl(s): if have_rtld: return s return '' class build_ext(_build_ext): def run(self): """Build extensions in build directory, then copy if --inplace""" old_inplace, self.inplace = self.inplace, 0 _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source() def copy_extensions_to_source(self): build_py = self.get_finalized_command('build_py') for ext in self.extensions: fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) modpath = fullname.split('.') package = '.'.join(modpath[:-1]) package_dir = build_py.get_package_dir(package) dest_filename = os.path.join(package_dir,os.path.basename(filename)) src_filename = os.path.join(self.build_lib,filename) # Always copy, even if source is older than destination, to ensure # that the right extensions for the current Python/platform are # used. copy_file( src_filename, dest_filename, verbose=self.verbose, dry_run=self.dry_run ) if ext._needs_stub: self.write_stub(package_dir or os.curdir, ext, True) if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'): # Workaround for problems using some Pyrex versions w/SWIG and/or 2.4 def swig_sources(self, sources, *otherargs): # first do any Pyrex processing sources = _build_ext.swig_sources(self, sources) or sources # Then do any actual SWIG stuff on the remainder return _du_build_ext.swig_sources(self, sources, *otherargs) def get_ext_filename(self, fullname): filename = _build_ext.get_ext_filename(self,fullname) ext = self.ext_map[fullname] if isinstance(ext,Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn,libtype) elif use_stubs and ext._links_to_dynamic: d,fn = os.path.split(filename) return os.path.join(d,'dl-'+fn) else: return filename def initialize_options(self): _build_ext.initialize_options(self) self.shlib_compiler = None self.shlibs = [] self.ext_map = {} def finalize_options(self): _build_ext.finalize_options(self) self.extensions = self.extensions or [] self.check_extensions_list(self.extensions) self.shlibs = [ext for ext in self.extensions if isinstance(ext,Library)] if self.shlibs: self.setup_shlib_compiler() for ext in self.extensions: ext._full_name = self.get_ext_fullname(ext.name) for ext in self.extensions: fullname = ext._full_name self.ext_map[fullname] = ext ltd = ext._links_to_dynamic = \ self.shlibs and self.links_to_dynamic(ext) or False ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library) filename = ext._file_name = self.get_ext_filename(fullname) libdir = os.path.dirname(os.path.join(self.build_lib,filename)) if ltd and libdir not in ext.library_dirs: ext.library_dirs.append(libdir) if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: ext.runtime_library_dirs.append(os.curdir) def setup_shlib_compiler(self): compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force ) if sys.platform == "darwin": tmp = _config_vars.copy() try: # XXX Help! I don't have any idea whether these are right... _config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup" _config_vars['CCSHARED'] = " -dynamiclib" _config_vars['SO'] = ".dylib" customize_compiler(compiler) finally: _config_vars.clear() _config_vars.update(tmp) else: customize_compiler(compiler) if self.include_dirs is not None: compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name,value) in self.define: compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: compiler.undefine_macro(macro) if self.libraries is not None: compiler.set_libraries(self.libraries) if self.library_dirs is not None: compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: compiler.set_link_objects(self.link_objects) # hack so distutils' build_extension() builds a library instead compiler.link_shared_object = link_shared_object.__get__(compiler) def get_export_symbols(self, ext): if isinstance(ext,Library): return ext.export_symbols return _build_ext.get_export_symbols(self,ext) def build_extension(self, ext): _compiler = self.compiler try: if isinstance(ext,Library): self.compiler = self.shlib_compiler _build_ext.build_extension(self,ext) if ext._needs_stub: self.write_stub( self.get_finalized_command('build_py').build_lib, ext ) finally: self.compiler = _compiler def links_to_dynamic(self, ext): """Return true if 'ext' links to a dynamic lib in the same package""" # XXX this should check to ensure the lib is actually being built # XXX as dynamic, and not just using a locally-found version or a # XXX static-compiled version libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) pkg = '.'.join(ext._full_name.split('.')[:-1]+['']) for libname in ext.libraries: if pkg+libname in libnames: return True return False def get_outputs(self): outputs = _build_ext.get_outputs(self) optimize = self.get_finalized_command('build_py').optimize for ext in self.extensions: if ext._needs_stub: base = os.path.join(self.build_lib, *ext._full_name.split('.')) outputs.append(base+'.py') outputs.append(base+'.pyc') if optimize: outputs.append(base+'.pyo') return outputs def write_stub(self, output_dir, ext, compile=False): log.info("writing stub loader for %s to %s",ext._full_name, output_dir) stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py' if compile and os.path.exists(stub_file): raise DistutilsError(stub_file+" already exists! Please delete.") if not self.dry_run: f = open(stub_file,'w') f.write('\n'.join([ "def __bootstrap__():", " global __bootstrap__, __file__, __loader__", " import sys, os, pkg_resources, imp"+if_dl(", dl"), " __file__ = pkg_resources.resource_filename(__name__,%r)" % os.path.basename(ext._file_name), " del __bootstrap__", " if '__loader__' in globals():", " del __loader__", if_dl(" old_flags = sys.getdlopenflags()"), " old_dir = os.getcwd()", " try:", " os.chdir(os.path.dirname(__file__))", if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), " imp.load_dynamic(__name__,__file__)", " finally:", if_dl(" sys.setdlopenflags(old_flags)"), " os.chdir(old_dir)", "__bootstrap__()", "" # terminal \n ])) f.close() if compile: from distutils.util import byte_compile byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run) optimize = self.get_finalized_command('install_lib').optimize if optimize > 0: byte_compile([stub_file], optimize=optimize, force=True, dry_run=self.dry_run) if os.path.exists(stub_file) and not self.dry_run: os.unlink(stub_file) if use_stubs or os.name=='nt': # Build shared libraries # def link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None ): self.link( self.SHARED_LIBRARY, objects, output_libname, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang ) else: # Build static libraries everywhere else libtype = 'static' def link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None ): # XXX we need to either disallow these attrs on Library instances, # or warn/abort here if set, or something... #libraries=None, library_dirs=None, runtime_library_dirs=None, #export_symbols=None, extra_preargs=None, extra_postargs=None, #build_temp=None assert output_dir is None # distutils build_ext doesn't pass this output_dir,filename = os.path.split(output_libname) basename, ext = os.path.splitext(filename) if self.library_filename("x").startswith('lib'): # strip 'lib' prefix; this is kludgy if some platform uses # a different prefix basename = basename[3:] self.create_static_lib( objects, basename, output_dir, debug, target_lang )
mit
w1ll1am23/home-assistant
homeassistant/components/yeelightsunflower/light.py
21
3638
"""Support for Yeelight Sunflower color bulbs (not Yeelight Blue or WiFi).""" import logging import voluptuous as vol import yeelightsunflower from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_HS_COLOR, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, LightEntity, ) from homeassistant.const import CONF_HOST import homeassistant.helpers.config_validation as cv import homeassistant.util.color as color_util _LOGGER = logging.getLogger(__name__) SUPPORT_YEELIGHT_SUNFLOWER = SUPPORT_BRIGHTNESS | SUPPORT_COLOR PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_HOST): cv.string}) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Yeelight Sunflower Light platform.""" host = config.get(CONF_HOST) hub = yeelightsunflower.Hub(host) if not hub.available: _LOGGER.error("Could not connect to Yeelight Sunflower hub") return False add_entities(SunflowerBulb(light) for light in hub.get_lights()) class SunflowerBulb(LightEntity): """Representation of a Yeelight Sunflower Light.""" def __init__(self, light): """Initialize a Yeelight Sunflower bulb.""" self._light = light self._available = light.available self._brightness = light.brightness self._is_on = light.is_on self._rgb_color = light.rgb_color self._unique_id = light.zid @property def name(self): """Return the display name of this light.""" return f"sunflower_{self._light.zid}" @property def unique_id(self): """Return the unique ID of this light.""" return self._unique_id @property def available(self): """Return True if entity is available.""" return self._available @property def is_on(self): """Return true if light is on.""" return self._is_on @property def brightness(self): """Return the brightness is 0-255; Yeelight's brightness is 0-100.""" return int(self._brightness / 100 * 255) @property def hs_color(self): """Return the color property.""" return color_util.color_RGB_to_hs(*self._rgb_color) @property def supported_features(self): """Flag supported features.""" return SUPPORT_YEELIGHT_SUNFLOWER def turn_on(self, **kwargs): """Instruct the light to turn on, optionally set colour/brightness.""" # when no arguments, just turn light on (full brightness) if not kwargs: self._light.turn_on() else: if ATTR_HS_COLOR in kwargs and ATTR_BRIGHTNESS in kwargs: rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR]) bright = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100) self._light.set_all(rgb[0], rgb[1], rgb[2], bright) elif ATTR_HS_COLOR in kwargs: rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR]) self._light.set_rgb_color(rgb[0], rgb[1], rgb[2]) elif ATTR_BRIGHTNESS in kwargs: bright = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100) self._light.set_brightness(bright) def turn_off(self, **kwargs): """Instruct the light to turn off.""" self._light.turn_off() def update(self): """Fetch new state data for this light and update local values.""" self._light.update() self._available = self._light.available self._brightness = self._light.brightness self._is_on = self._light.is_on self._rgb_color = self._light.rgb_color
apache-2.0
syllog1sm/TextBlob
text/nltk/tag/tnt.py
2
18395
# Natural Language Toolkit: TnT Tagger # # Copyright (C) 2001-2013 NLTK Project # Author: Sam Huston <[email protected]> # # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT ''' Implementation of 'TnT - A Statisical Part of Speech Tagger' by Thorsten Brants http://acl.ldc.upenn.edu/A/A00/A00-1031.pdf ''' from __future__ import print_function from math import log from operator import itemgetter from nltk.probability import FreqDist, ConditionalFreqDist from nltk.tag.api import TaggerI class TnT(TaggerI): ''' TnT - Statistical POS tagger IMPORTANT NOTES: * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS - It is possible to provide an untrained POS tagger to create tags for unknown words, see __init__ function * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT - Due to the nature of this tagger, it works best when trained over sentence delimited input. - However it still produces good results if the training data and testing data are separated on all punctuation eg: [,.?!] - Input for training is expected to be a list of sentences where each sentence is a list of (word, tag) tuples - Input for tag function is a single sentence Input for tagdata function is a list of sentences Output is of a similar form * Function provided to process text that is unsegmented - Please see basic_sent_chop() TnT uses a second order Markov model to produce tags for a sequence of input, specifically: argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T) IE: the maximum projection of a set of probabilities The set of possible tags for a given word is derived from the training data. It is the set of all tags that exact word has been assigned. To speed up and get more precision, we can use log addition to instead multiplication, specifically: argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] + log(P(t_T+1|t_T)) The probability of a tag for a given word is the linear interpolation of 3 markov models; a zero-order, first-order, and a second order model. P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) + l3*P(t_i| t_i-1, t_i-2) A beam search is used to limit the memory usage of the algorithm. The degree of the beam can be changed using N in the initialization. N represents the maximum number of possible solutions to maintain while tagging. It is possible to differentiate the tags which are assigned to capitalized words. However this does not result in a significant gain in the accuracy of the results. ''' def __init__(self, unk=None, Trained=False, N=1000, C=False): ''' Construct a TnT statistical tagger. Tagger must be trained before being used to tag input. :param unk: instance of a POS tagger, conforms to TaggerI :type unk:(TaggerI) :param Trained: Indication that the POS tagger is trained or not :type Trained: boolean :param N: Beam search degree (see above) :type N:(int) :param C: Capitalization flag :type C: boolean Initializer, creates frequency distributions to be used for tagging _lx values represent the portion of the tri/bi/uni taggers to be used to calculate the probability N value is the number of possible solutions to maintain while tagging. A good value for this is 1000 C is a boolean value which specifies to use or not use the Capitalization of the word as additional information for tagging. NOTE: using capitalization may not increase the accuracy of the tagger ''' self._uni = FreqDist() self._bi = ConditionalFreqDist() self._tri = ConditionalFreqDist() self._wd = ConditionalFreqDist() self._eos = ConditionalFreqDist() self._l1 = 0.0 self._l2 = 0.0 self._l3 = 0.0 self._N = N self._C = C self._T = Trained self._unk = unk # statistical tools (ignore or delete me) self.unknown = 0 self.known = 0 def train(self, data): ''' Uses a set of tagged data to train the tagger. If an unknown word tagger is specified, it is trained on the same data. :param data: List of lists of (word, tag) tuples :type data: tuple(str) ''' # Ensure that local C flag is initialized before use C = False if self._unk is not None and self._T == False: self._unk.train(data) for sent in data: history = [('BOS',False), ('BOS',False)] for w, t in sent: # if capitalization is requested, # and the word begins with a capital # set local flag C to True if self._C and w[0].isupper(): C=True self._wd[w].inc(t) self._uni.inc((t,C)) self._bi[history[1]].inc((t,C)) self._tri[tuple(history)].inc((t,C)) history.append((t,C)) history.pop(0) # set local flag C to false for the next word C = False self._eos[t].inc('EOS') # compute lambda values from the trained frequency distributions self._compute_lambda() #(debugging -- ignore or delete me) #print "lambdas" #print i, self._l1, i, self._l2, i, self._l3 def _compute_lambda(self): ''' creates lambda values based upon training data NOTE: no need to explicitly reference C, it is contained within the tag variable :: tag == (tag,C) for each tag trigram (t1, t2, t3) depending on the maximum value of - f(t1,t2,t3)-1 / f(t1,t2)-1 - f(t2,t3)-1 / f(t2)-1 - f(t3)-1 / N-1 increment l3,l2, or l1 by f(t1,t2,t3) ISSUES -- Resolutions: if 2 values are equal, increment both lambda values by (f(t1,t2,t3) / 2) ''' # temporary lambda variables tl1 = 0.0 tl2 = 0.0 tl3 = 0.0 # for each t1,t2 in system for history in self._tri.conditions(): (h1, h2) = history # for each t3 given t1,t2 in system # (NOTE: tag actually represents (tag,C)) # However no effect within this function for tag in self._tri[history].samples(): # if there has only been 1 occurrence of this tag in the data # then ignore this trigram. if self._uni[tag] == 1: continue # safe_div provides a safe floating point division # it returns -1 if the denominator is 0 c3 = self._safe_div((self._tri[history][tag]-1), (self._tri[history].N()-1)) c2 = self._safe_div((self._bi[h2][tag]-1), (self._bi[h2].N()-1)) c1 = self._safe_div((self._uni[tag]-1), (self._uni.N()-1)) # if c1 is the maximum value: if (c1 > c3) and (c1 > c2): tl1 += self._tri[history][tag] # if c2 is the maximum value elif (c2 > c3) and (c2 > c1): tl2 += self._tri[history][tag] # if c3 is the maximum value elif (c3 > c2) and (c3 > c1): tl3 += self._tri[history][tag] # if c3, and c2 are equal and larger than c1 elif (c3 == c2) and (c3 > c1): tl2 += float(self._tri[history][tag]) /2.0 tl3 += float(self._tri[history][tag]) /2.0 # if c1, and c2 are equal and larger than c3 # this might be a dumb thing to do....(not sure yet) elif (c2 == c1) and (c1 > c3): tl1 += float(self._tri[history][tag]) /2.0 tl2 += float(self._tri[history][tag]) /2.0 # otherwise there might be a problem # eg: all values = 0 else: #print "Problem", c1, c2 ,c3 pass # Lambda normalisation: # ensures that l1+l2+l3 = 1 self._l1 = tl1 / (tl1+tl2+tl3) self._l2 = tl2 / (tl1+tl2+tl3) self._l3 = tl3 / (tl1+tl2+tl3) def _safe_div(self, v1, v2): ''' Safe floating point division function, does not allow division by 0 returns -1 if the denominator is 0 ''' if v2 == 0: return -1 else: return float(v1) / float(v2) def tagdata(self, data): ''' Tags each sentence in a list of sentences :param data:list of list of words :type data: [[string,],] :return: list of list of (word, tag) tuples Invokes tag(sent) function for each sentence compiles the results into a list of tagged sentences each tagged sentence is a list of (word, tag) tuples ''' res = [] for sent in data: res1 = self.tag(sent) res.append(res1) return res def tag(self, data): ''' Tags a single sentence :param data: list of words :type data: [string,] :return: [(word, tag),] Calls recursive function '_tagword' to produce a list of tags Associates the sequence of returned tags with the correct words in the input sequence returns a list of (word, tag) tuples ''' current_state = [(['BOS', 'BOS'], 0.0)] sent = list(data) tags = self._tagword(sent, current_state) res = [] for i in range(len(sent)): # unpack and discard the C flags (t,C) = tags[i+2] res.append((sent[i], t)) return res def _tagword(self, sent, current_states): ''' :param sent : List of words remaining in the sentence :type sent : [word,] :param current_states : List of possible tag combinations for the sentence so far, and the log probability associated with each tag combination :type current_states : [([tag, ], logprob), ] Tags the first word in the sentence and recursively tags the reminder of sentence Uses formula specified above to calculate the probability of a particular tag ''' # if this word marks the end of the sentance, # return the most probable tag if sent == []: (h, logp) = current_states[0] return h # otherwise there are more words to be tagged word = sent[0] sent = sent[1:] new_states = [] # if the Capitalisation is requested, # initalise the flag for this word C = False if self._C and word[0].isupper(): C=True # if word is known # compute the set of possible tags # and their associated log probabilities if word in self._wd.conditions(): self.known += 1 for (history, curr_sent_logprob) in current_states: logprobs = [] for t in self._wd[word].samples(): p_uni = self._uni.freq((t,C)) p_bi = self._bi[history[-1]].freq((t,C)) p_tri = self._tri[tuple(history[-2:])].freq((t,C)) p_wd = float(self._wd[word][t])/float(self._uni[(t,C)]) p = self._l1 *p_uni + self._l2 *p_bi + self._l3 *p_tri p2 = log(p, 2) + log(p_wd, 2) logprobs.append(((t,C), p2)) # compute the result of appending each tag to this history for (tag, logprob) in logprobs: new_states.append((history + [tag], curr_sent_logprob + logprob)) # otherwise a new word, set of possible tags is unknown else: self.unknown += 1 # since a set of possible tags, # and the probability of each specific tag # can not be returned from most classifiers: # specify that any unknown words are tagged with certainty p = 1 # if no unknown word tagger has been specified # then use the tag 'Unk' if self._unk is None: tag = ('Unk',C) # otherwise apply the unknown word tagger else : [(_w, t)] = list(self._unk.tag([word])) tag = (t,C) for (history, logprob) in current_states: history.append(tag) new_states = current_states # now have computed a set of possible new_states # sort states by log prob # set is now ordered greatest to least log probability new_states.sort(reverse=True, key=itemgetter(1)) # del everything after N (threshold) # this is the beam search cut if len(new_states) > self._N: new_states = new_states[:self._N] # compute the tags for the rest of the sentence # return the best list of tags for the sentence return self._tagword(sent, new_states) ######################################## # helper function -- basic sentence tokenizer ######################################## def basic_sent_chop(data, raw=True): ''' Basic method for tokenizing input into sentences for this tagger: :param data: list of tokens (words or (word, tag) tuples) :type data: str or tuple(str, str) :param raw: boolean flag marking the input data as a list of words or a list of tagged words :type raw: bool :return: list of sentences sentences are a list of tokens tokens are the same as the input Function takes a list of tokens and separates the tokens into lists where each list represents a sentence fragment This function can separate both tagged and raw sequences into basic sentences. Sentence markers are the set of [,.!?] This is a simple method which enhances the performance of the TnT tagger. Better sentence tokenization will further enhance the results. ''' new_data = [] curr_sent = [] sent_mark = [',','.','?','!'] if raw: for word in data: if word in sent_mark: curr_sent.append(word) new_data.append(curr_sent) curr_sent = [] else: curr_sent.append(word) else: for (word,tag) in data: if word in sent_mark: curr_sent.append((word,tag)) new_data.append(curr_sent) curr_sent = [] else: curr_sent.append((word,tag)) return new_data def demo(): from nltk.corpus import brown sents = list(brown.tagged_sents()) test = list(brown.sents()) # create and train the tagger tagger = TnT() tagger.train(sents[200:1000]) # tag some data tagged_data = tagger.tagdata(test[100:120]) # print results for j in range(len(tagged_data)): s = tagged_data[j] t = sents[j+100] for i in range(len(s)): print(s[i],'--', t[i]) print() def demo2(): from nltk.corpus import treebank d = list(treebank.tagged_sents()) t = TnT(N=1000, C=False) s = TnT(N=1000, C=True) t.train(d[(11)*100:]) s.train(d[(11)*100:]) for i in range(10): tacc = t.evaluate(d[i*100:((i+1)*100)]) tp_un = float(t.unknown) / float(t.known +t.unknown) tp_kn = float(t.known) / float(t.known + t.unknown) t.unknown = 0 t.known = 0 print('Capitalization off:') print('Accuracy:', tacc) print('Percentage known:', tp_kn) print('Percentage unknown:', tp_un) print('Accuracy over known words:', (tacc / tp_kn)) sacc = s.evaluate(d[i*100:((i+1)*100)]) sp_un = float(s.unknown) / float(s.known +s.unknown) sp_kn = float(s.known) / float(s.known + s.unknown) s.unknown = 0 s.known = 0 print('Capitalization on:') print('Accuracy:', sacc) print('Percentage known:', sp_kn) print('Percentage unknown:', sp_un) print('Accuracy over known words:', (sacc / sp_kn)) def demo3(): from nltk.corpus import treebank, brown d = list(treebank.tagged_sents()) e = list(brown.tagged_sents()) d = d[:1000] e = e[:1000] d10 = int(len(d)*0.1) e10 = int(len(e)*0.1) tknacc = 0 sknacc = 0 tallacc = 0 sallacc = 0 tknown = 0 sknown = 0 for i in range(10): t = TnT(N=1000, C=False) s = TnT(N=1000, C=False) dtest = d[(i*d10):((i+1)*d10)] etest = e[(i*e10):((i+1)*e10)] dtrain = d[:(i*d10)] + d[((i+1)*d10):] etrain = e[:(i*e10)] + e[((i+1)*e10):] t.train(dtrain) s.train(etrain) tacc = t.evaluate(dtest) tp_un = float(t.unknown) / float(t.known +t.unknown) tp_kn = float(t.known) / float(t.known + t.unknown) tknown += tp_kn t.unknown = 0 t.known = 0 sacc = s.evaluate(etest) sp_un = float(s.unknown) / float(s.known + s.unknown) sp_kn = float(s.known) / float(s.known + s.unknown) sknown += sp_kn s.unknown = 0 s.known = 0 tknacc += (tacc / tp_kn) sknacc += (sacc / tp_kn) tallacc += tacc sallacc += sacc #print i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc print("brown: acc over words known:", 10 * tknacc) print(" : overall accuracy:", 10 * tallacc) print(" : words known:", 10 * tknown) print("treebank: acc over words known:", 10 * sknacc) print(" : overall accuracy:", 10 * sallacc) print(" : words known:", 10 * sknown) if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
mit
barnone/EigenD
plg_midi/midi_input_plg.py
1
16450
# # Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com # # This file is part of EigenD. # # EigenD is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EigenD is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EigenD. If not, see <http://www.gnu.org/licenses/>. # import os import picross import piw from pi import atom,bundles,domain,agent,logic,utils,node,action,async,upgrade from . import midi_input_version as version,midi_native class VirtualKey(atom.Atom): def __init__(self): atom.Atom.__init__(self,names='key',protocols='virtual') self.choices=[] def __key(self,*keys): x = ','.join(['cmp([dsc(~(parent)"#1","%(k)d")])' % dict(k=k) for k in keys]) return '[%s]' % x def rpc_resolve(self,arg): (a,o) = logic.parse_clause(arg) print 'resolving virtual',arg,(a,o) if not a and o is None: return self.__key(*range(0,128)) if a==('chosen',) and o is None: return self.__key(*self.choices) if a or o is None: return self.__key() o=int(o) if o<0 or o>127: return self.__key() return self.__key(o) class VirtualCC(atom.Atom): clist = ( ('bank select coarse', 0), ('modulation wheel coarse', 1), ('breath controller coarse', 2), ('foot pedal coarse', 4), ('portamento time coarse', 5), ('data entry coarse', 6), ('volume coarse', 7), ('balance coarse', 8), ('pan position coarse', 10), ('expression coarse', 11), ('effect control 1 coarse', 12), ('effect control 2 coarse', 13), ('general purpose slider 1', 16), ('general purpose slider 2', 17), ('general purpose slider 3', 18), ('general purpose slider 4', 19), ('bank select fine', 32), ('modulation wheel fine', 33), ('breath controller fine', 34), ('foot pedal fine', 36), ('portamento time fine', 37), ('data entry fine', 38), ('volume fine', 39), ('balance fine', 40), ('pan position fine', 42), ('expression fine', 43), ('effect control 1 fine', 44), ('effect control 2 fine', 45), ('hold pedal', 64), ('portamento', 65), ('sustenuto pedal', 66), ('soft pedal', 67), ('legato pedal', 68), ('hold 2 pedal', 69), ('sound variation', 70), ('sound timbre', 71), ('sound release time', 72), ('sound attack time', 73), ('sound brightness', 74), ('sound control 6', 75), ('sound control 7', 76), ('sound control 8', 77), ('sound control 9', 78), ('sound control 10', 79), ('general purpose button 1', 80), ('general purpose button 2', 81), ('general purpose button 3', 82), ('general purpose button 4', 83), ('effects level', 91), ('tremulo level', 92), ('chorus level', 93), ('celeste level', 94), ('phaser level', 95), ('data button increment', 96), ('data button decrement', 97), ('non-registered parameter fine', 98), ('non-registered parameter coarse', 99), ('registered parameter fine', 100), ('registered parameter coarse', 101), ('all sound off', 120), ('all controllers off', 121), ('local keyboard', 122), ('all notes off', 123), ('omni mode off', 124), ('omni mode on', 125), ('mono operation', 126), ('poly operation', 127)) cdict = dict(clist) def __init__(self): atom.Atom.__init__(self,names='continuous controller',protocols='virtual browse') self.__selected=None def rpc_setselected(self,arg): print 'VirtualCC:setselected',arg def rpc_activated(self,arg): print 'VirtualCC:activated',arg return logic.render_term(('','')) def rpc_current(self,arg): return '[]' def __key(self,*keys): x = ','.join(['cmp([dsc(~(parent)"#2","%(k)d")])' % dict(k=k) for k in keys]) return '[%s]' % x def rpc_resolve(self,arg): (a,o) = logic.parse_clause(arg) a = (' '.join(a)).lower() print 'midi cc resolving',a,o if a in self.cdict: return self.__key(self.cdict[a]) a2 = a+' coarse' if a2 in self.cdict: return self.__key(self.cdict[a2]) if not a and o is None: return self.__key(*range(0,128)) if a or o is None: return self.__key() o=int(o) if o<0 or o>127: return self.__key() print 'resolved to',self.__key(o) return self.__key(o) def rpc_enumerate(self,a): return logic.render_term((len(self.clist),0)) def rpc_cinfo(self,a): return '[]' def rpc_finfo(self,a): (path,idx) = logic.parse_clause(a) map = tuple([ (str(s),'cc %d: %s' % (s,n),None) for (n,s) in self.clist[idx:] ]) return logic.render_term(map) def rpc_fideal(self,arg): try: (path,cookie) = logic.parse_clause(arg) cookie=int(cookie) except: utils.log_exception() return async.failure('invalid cookie') for name,val in self.clist: if cookie==val: return 'cmp([dsc(~(parent)"#2",%d)])' % val return async.failure('invalid cookie') class VirtualProgramChange(atom.Atom): def __init__(self): atom.Atom.__init__(self,names='program change',protocols='virtual') self.choices=[] def __key(self,*keys): x = ','.join(['cmp([dsc(~(parent)"#8","%(k)d")])' % dict(k=k) for k in keys]) return '[%s]' % x def rpc_resolve(self,arg): (a,o) = logic.parse_clause(arg) print 'resolving virtual',arg,(a,o) if not a and o is None: return self.__key(*range(0,128)) if a==('chosen',) and o is None: return self.__key(*self.choices) if a or o is None: return self.__key() o=int(o) if o<0 or o>127: return self.__key() return self.__key(o) class VirtualTrigger(atom.Atom): def __init__(self): atom.Atom.__init__(self,names='trigger',protocols='virtual') self.choices=[] def __key(self,*keys): x = ','.join(['cmp([dsc(~(parent)"#10","%(k)d")])' % dict(k=k) for k in keys]) return '[%s]' % x def rpc_resolve(self,arg): (a,o) = logic.parse_clause(arg) print 'resolving virtual',arg,(a,o) if not a and o is None: return self.__key(*range(0,128)) if a==('chosen',) and o is None: return self.__key(*self.choices) if a or o is None: return self.__key() o=int(o) if o<0 or o>127: return self.__key() return self.__key(o) class MidiDelegate(midi_native.midi_input): def __init__(self,key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie,notify): midi_native.midi_input.__init__(self,key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie) self.sources = [] self.__notify = notify def source_added(self,id,name): xid = '%x'%id for i,(u,n) in enumerate(self.sources): if u==xid: print 'midi source changed',xid,name self.sources[i] = (xid,name) self.__notify() return print 'midi source added',xid,name self.sources.append((xid,name)) self.__notify() def source_removed(self,id): xid = '%x'%id for i,(u,n) in enumerate(self.sources): if u==xid: print 'midi source removed',xid,n del self.sources[i] self.__notify() return class MidiPort(atom.Atom): def __init__(self, key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie): self.__timestamp = piw.tsd_time() self.__midi = MidiDelegate(key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie,self.__sinks_changed) atom.Atom.__init__(self,domain=domain.String(),names='midi port',policy=atom.default_policy(self.setport),protocols='virtual browse') self.__midi.setport(0) self.__midi.set_destination('') self.__selected=None self.__update() self.__index = 1 def set_index(self,index): self.__index = index if self.open(): self.__midi.set_destination('Eigenlabs %d' % self.__index) def server_opened(self): atom.Atom.server_opened(self) self.__midi.set_destination('Eigenlabs %d' % self.__index) self.__midi.run() self.setport(self.get_value()) def close_server(self): atom.Atom.close_server(self) self.__midi.set_destination('') self.__midi.setport(0) self.__midi.stop() def __update(self): if not self.get_value() and len(self.__midi.sources): port = self.__midi.sources[0][0] self.__midi.setport(int(port,16)) self.__timestamp = self.__timestamp+1 self.set_property_string('timestamp',str(self.__timestamp)) def setport(self,port): self.set_value(port) self.__update() if self.open(): print 'set port to',port if port: self.__midi.setport(int(port,16)) else: if len(self.__midi.sources): self.__midi.setport(int(self.__midi.sources[0][0],16)) def __sinks_changed(self): self.setport(self.get_value()) def rpc_displayname(self,arg): return 'MIDI input ports' def rpc_setselected(self,arg): (path,selected)=logic.parse_clause(arg) print 'MidiPort:setselected',selected self.__selected=selected def rpc_activated(self,arg): (path,selected)=logic.parse_clause(arg) print 'MidiPort:activated',selected port=selected self.setport(port) return logic.render_term(('','')) def clear_trim(self): self.__midi.clear_trim() def set_trim(self,cc,min,max,inv): self.__midi.set_trim(cc,min,max,inv) def current(self,cc): return self.__midi.current(cc) def resolve_name(self,name): if name=='selection': # o=self.__selected return self.__ideal(self.__selected) else: try: o = int(name) except: return '[]' if o>0 and o<len(self.__midi.sources)+1: return self.__ideal(self.__midi.sources[o-1][0]) return '[]' def __ideal(self,uid): return '[ideal([~server,midiport],%s)]' % logic.render_term(uid) def rpc_fideal(self,arg): (path,cookie) = logic.parse_clause(arg) for id,n in self.__midi.sources: if id==cookie: return 'ideal([~server,midiport],%s)' % logic.render_term(cookie) return async.failure('invalid cookie') def rpc_current(self,arg): current = self.__midi.getport() if current==0: return '[]' return '[["%x",[]]]' % current def rpc_resolve(self,arg): (a,o) = logic.parse_clause(arg) if a or not o: return '[]' return self.resolve_name(o) def rpc_enumerate(self,a): return logic.render_term((len(self.__midi.sources),0)) def rpc_cinfo(self,a): return '[]' def rpc_finfo(self,a): (dlist,cnum) = logic.parse_clause(a) map = tuple([(uid,dsc,None) for (uid,dsc) in self.__midi.sources[cnum:]]) return logic.render_term(map) class Agent(agent.Agent): def __init__(self, address, ordinal): agent.Agent.__init__(self,names='midi input',signature=version,container=6,ordinal=ordinal) self.domain = piw.clockdomain_ctl() self.domain.set_source(piw.makestring('*',0)) self.set_private(node.Server(value=piw.makestring('[]',0), change=self.__settrim)) self[1] = bundles.Output(1,False,names='key output') self[2] = bundles.Output(1,False,names='continuous controller output') self[8] = bundles.Output(1,False,names='program change output') self[10] = bundles.Output(1,False,names='trigger output') self.key_output = bundles.Splitter(self.domain,self[1]) self.cc_output = bundles.Splitter(self.domain,self[2]) self.programchange_output = bundles.Splitter(self.domain,self[8]) self.trigger_output = bundles.Splitter(self.domain,self[10]) self[6] = bundles.Output(1,False,names='midi output') self[7] = bundles.Output(2,False,names='midi clock output') self.midi_output = bundles.Splitter(self.domain,self[6],self[7]) self[3] = VirtualKey() self[4] = VirtualCC() self[9] = VirtualProgramChange() self[11] = VirtualTrigger() self[5] = MidiPort(self.key_output.cookie(),self.cc_output.cookie(),self.programchange_output.cookie(),self.trigger_output.cookie(),self.midi_output.cookie()) self.add_verb2(2,'choose([],None,role(None,[ideal([~server,midiport]),singular]))',self.__chooseport) self.add_verb2(3,'invert([],None,role(None,[cmpdsc(~(s)"#2")]))', self.__invert); self.add_verb2(4,'minimise([],None,role(None,[cmpdsc(~(s)"#2")]),option(to,[numeric]))', self.__setmin); self.add_verb2(5,'maximise([],None,role(None,[cmpdsc(~(s)"#2")]),option(to,[numeric]))', self.__setmax); self.set_ordinal(ordinal) def property_change(self,key,value,delegate): if key == 'ordinal': self[5].set_index(self.get_property_long('ordinal',1)) def __settrim(self,val): if val.is_string(): trim = logic.parse_clause(val.as_string()) self[5].clear_trim() for (cc,min,max,inv) in trim: self[5].set_trim(cc,min,max,inv) print 'trim:',trim self.get_private().set_data(val) def get_trim(self,cc): trim = logic.parse_clause(self.get_private().get_data().as_string()) for (tcc,min,max,inv) in trim: if tcc==cc: return list((tcc,min,max,inv)) return [cc,0,127,False] def set_trim(self,cc,min,max,inv): trim = list(logic.parse_clause(self.get_private().get_data().as_string())) done = False for (i,(tcc,tmin,tmax,tinv)) in enumerate(trim): if tcc==cc: trim[i] = (cc,min,max,inv) done = True if not done: trim.append((cc,min,max,inv)) self[5].set_trim(cc,min,max,inv) trim = logic.render_term(tuple(trim)) self.get_private().set_data(piw.makestring(trim,0)) def __invert(self,subj,arg): cc = int(arg[0].args[0][0].args[1]) print 'invert controller',cc trim = self.get_trim(cc) trim[3] = not trim[3] self.set_trim(*trim) def __setmin(self,subj,arg,val): cc = int(arg[0].args[0][0].args[1]) if val is None: val=self[5].current(cc) else: val=int(action.abstract_string(val)) print 'set controller minimum',cc,val trim = self.get_trim(cc) trim[1] = val if trim[1]<=trim[2]: trim[3]=False else: trim[3]=True a=trim[1] trim[1]=trim[2] trim[2]=a self.set_trim(*trim) def __setmax(self,subj,arg,val): cc = int(arg[0].args[0][0].args[1]) if val is None: val=self[5].current(cc) else: val=int(action.abstract_string(val)) print 'set controller maximum',cc,val trim = self.get_trim(cc) trim[2] = val if trim[1]<=trim[2]: trim[3]=False else: trim[3]=True a=trim[1] trim[1]=trim[2] trim[2]=a self.set_trim(*trim) def rpc_resolve_ideal(self,arg): (type,arg) = action.unmarshal(arg) print 'resolving',arg if type=='midiport': return self[5].resolve_name(' '.join(arg)) return action.marshal(()) def __chooseport(self,subj,arg): print 'choose port',arg print action.arg_objects(arg)[0] (type,thing) = action.crack_ideal(action.arg_objects(arg)[0]) print type,thing self[5].setport(thing) agent.main(Agent,gui=True)
gpl-3.0
patsissons/Flexget
flexget/plugins/output/pushalot.py
4
6342
from __future__ import unicode_literals, division, absolute_import import logging from flexget import plugin from flexget.event import event from flexget.utils import json from flexget.utils.template import RenderError from flexget.config_schema import one_or_more log = logging.getLogger("pushalot") pushalot_url = "https://pushalot.com/api/sendmessage" class OutputPushalot(object): """ Example:: pushalot: token: <string> Authorization token (can also be a list of tokens) - Required title: <string> (default: task name -- accepts Jinja2) body: <string> (default: "{{series_name}} {{series_id}}" -- accepts Jinja2) link: <string> (default: "{{imdb_url}}" -- accepts Jinja2) linktitle: <string> (default: (none) -- accepts Jinja2) important: <boolean> (default is False) silent: <boolean< (default is False) image: <string> (default: (none) -- accepts Jinja2) source: <string> (default is "FlexGet") timetolive: <integer> (no default sent, default is set by Pushalot) Configuration parameters are also supported from entries (eg. through set). """ default_body = "{% if series_name is defined %}{{tvdb_series_name|d(series_name)}} " \ "{{series_id}} {{tvdb_ep_name|d('')}}{% elif imdb_name is defined %}{{imdb_name}} "\ "{{imdb_year}}{% else %}{{title}}{% endif %}" schema = { 'type': 'object', 'properties': { 'token': one_or_more({'type': 'string'}), 'title': {'type': 'string', 'default': "Task {{task}}"}, 'body': {'type': 'string', 'default': default_body}, 'link': {'type': 'string', 'default': '{% if imdb_url is defined %}{{imdb_url}}{% endif %}'}, 'linktitle': {'type': 'string', 'default': ''}, 'important': {'type': 'boolean', 'default': False}, 'silent': {'type': 'boolean', 'default': False}, 'image': {'type': 'string', 'default': ''}, 'source': {'type': 'string', 'default': 'FlexGet'}, 'timetolive': {'type': 'integer', 'default': 0}, }, 'required': ['token'], 'additionalProperties': False } # Run last to make sure other outputs are successful before sending notification @plugin.priority(0) def on_task_output(self, task, config): # Support for multiple tokens tokens = config["token"] if not isinstance(tokens, list): tokens = [tokens] # Loop through the provided entries for entry in task.accepted: title = config["title"] body = config["body"] link = config["link"] linktitle = config["linktitle"] important = config["important"] silent = config["silent"] image = config["image"] source = config["source"] timetolive = config["timetolive"] # Attempt to render the title field try: title = entry.render(title) except RenderError as e: log.warning("Problem rendering 'title': %s" % e) title = "Download started" # Attempt to render the body field try: body = entry.render(body) except RenderError as e: log.warning("Problem rendering 'body': %s" % e) body = entry["title"] # Attempt to render the link field try: link = entry.render(link) except RenderError as e: log.warning("Problem rendering 'link': %s" % e) link = entry.get("imdb_url", "") # Attempt to render the linktitle field try: linktitle = entry.render(linktitle) except RenderError as e: log.warning("Problem rendering 'linktitle': %s" % e) linktitle = "" try: image = entry.render(image) except RenderError as e: log.warning("Problem rendering 'image': %s" % e) image = "" for token in tokens: # Build the request data = {"AuthorizationToken": token, "title": title, "body": body, "link": link, "linktitle": linktitle, "important": important, "silent": silent, "image": image, "source": source, "timetolive": timetolive} # Check for test mode if task.options.test: log.info("Test mode. Pushalot notification would be:") log.info(" Title: %s" % title) log.info(" body: %s" % body) log.info(" link: %s" % link) log.info(" link Title: %s" % linktitle) log.info(" token: %s" % token) log.info(" important: %s" % important) log.info(" silent: %s" % silent) log.info(" image: %s" % image) log.info(" source: %s" % source) log.info(" timetolive: %s" % timetolive) # Test mode. Skip remainder. continue # Make the request response = task.requests.post(pushalot_url, data=data, raise_status=False) # Check if it succeeded request_status = response.status_code # error codes and bodys from Pushalot API if request_status == 200: log.debug("Pushalot notification sent") elif request_status == 500: log.debug("Pushalot notification failed, Pushalot API having issues") # TODO: Implement retrying. API requests 5 seconds between retries. elif request_status >= 400: errors = json.loads(response.content) log.error("Pushalot API error: %s" % errors['Description']) else: log.error("Unknown error when sending Pushalot notification") @event('plugin.register') def register_plugin(): plugin.register(OutputPushalot, "pushalot", api_ver=2)
mit
dirtybit/libcustomperf
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <[email protected]> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
hosseinmh/Django_learning
djmod/.venv/lib/python3.5/site-packages/setuptools/site-patch.py
356
2307
def __boot(): import sys import os PYTHONPATH = os.environ.get('PYTHONPATH') if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH): PYTHONPATH = [] else: PYTHONPATH = PYTHONPATH.split(os.pathsep) pic = getattr(sys, 'path_importer_cache', {}) stdpath = sys.path[len(PYTHONPATH):] mydir = os.path.dirname(__file__) for item in stdpath: if item == mydir or not item: continue # skip if current dir. on Windows, or my own directory importer = pic.get(item) if importer is not None: loader = importer.find_module('site') if loader is not None: # This should actually reload the current module loader.load_module('site') break else: try: import imp # Avoid import loop in Python >= 3.3 stream, path, descr = imp.find_module('site', [item]) except ImportError: continue if stream is None: continue try: # This should actually reload the current module imp.load_module('site', stream, path, descr) finally: stream.close() break else: raise ImportError("Couldn't find the real 'site' module") known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp oldpos = getattr(sys, '__egginsert', 0) # save old insertion position sys.__egginsert = 0 # and reset the current one for item in PYTHONPATH: addsitedir(item) sys.__egginsert += oldpos # restore effective old position d, nd = makepath(stdpath[0]) insert_at = None new_path = [] for item in sys.path: p, np = makepath(item) if np == nd and insert_at is None: # We've hit the first 'system' path entry, so added entries go here insert_at = len(new_path) if np in known_paths or insert_at is None: new_path.append(item) else: # new path after the insert point, back-insert it new_path.insert(insert_at, item) insert_at += 1 sys.path[:] = new_path if __name__ == 'site': __boot() del __boot
mit
eayunstack/horizon
openstack_dashboard/dashboards/identity/projects/workflows.py
6
37160
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf import settings from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from openstack_auth import utils as auth_utils from horizon import exceptions from horizon import forms from horizon import messages from horizon.utils import memoized from horizon import workflows from openstack_dashboard import api from openstack_dashboard.api import base from openstack_dashboard.api import cinder from openstack_dashboard.api import keystone from openstack_dashboard.api import nova from openstack_dashboard.usage import quotas INDEX_URL = "horizon:identity:projects:index" ADD_USER_URL = "horizon:identity:projects:create_user" PROJECT_GROUP_ENABLED = keystone.VERSIONS.active >= 3 PROJECT_USER_MEMBER_SLUG = "update_members" PROJECT_GROUP_MEMBER_SLUG = "update_group_members" COMMON_HORIZONTAL_TEMPLATE = "identity/projects/_common_horizontal_form.html" class ProjectQuotaAction(workflows.Action): ifcb_label = _("Injected File Content (Bytes)") metadata_items = forms.IntegerField(min_value=-1, label=_("Metadata Items")) cores = forms.IntegerField(min_value=-1, label=_("VCPUs")) instances = forms.IntegerField(min_value=-1, label=_("Instances")) injected_files = forms.IntegerField(min_value=-1, label=_("Injected Files")) injected_file_content_bytes = forms.IntegerField(min_value=-1, label=ifcb_label) volumes = forms.IntegerField(min_value=-1, label=_("Volumes")) snapshots = forms.IntegerField(min_value=-1, label=_("Volume Snapshots")) gigabytes = forms.IntegerField( min_value=-1, label=_("Total Size of Volumes and Snapshots (GB)")) ram = forms.IntegerField(min_value=-1, label=_("RAM (MB)")) floating_ips = forms.IntegerField(min_value=-1, label=_("Floating IPs")) fixed_ips = forms.IntegerField(min_value=-1, label=_("Fixed IPs")) security_groups = forms.IntegerField(min_value=-1, label=_("Security Groups")) security_group_rules = forms.IntegerField(min_value=-1, label=_("Security Group Rules")) # Neutron security_group = forms.IntegerField(min_value=-1, label=_("Security Groups")) security_group_rule = forms.IntegerField(min_value=-1, label=_("Security Group Rules")) floatingip = forms.IntegerField(min_value=-1, label=_("Floating IPs")) network = forms.IntegerField(min_value=-1, label=_("Networks")) port = forms.IntegerField(min_value=-1, label=_("Ports")) router = forms.IntegerField(min_value=-1, label=_("Routers")) subnet = forms.IntegerField(min_value=-1, label=_("Subnets")) def __init__(self, request, *args, **kwargs): super(ProjectQuotaAction, self).__init__(request, *args, **kwargs) disabled_quotas = quotas.get_disabled_quotas(request) for field in disabled_quotas: if field in self.fields: self.fields[field].required = False self.fields[field].widget = forms.HiddenInput() class UpdateProjectQuotaAction(ProjectQuotaAction): def clean(self): cleaned_data = super(UpdateProjectQuotaAction, self).clean() usages = quotas.tenant_quota_usages( self.request, tenant_id=self.initial['project_id']) # Validate the quota values before updating quotas. bad_values = [] for key, value in cleaned_data.items(): used = usages[key].get('used', 0) if value is not None and value >= 0 and used > value: bad_values.append(_('%(used)s %(key)s used') % {'used': used, 'key': quotas.QUOTA_NAMES.get(key, key)}) if bad_values: value_str = ", ".join(bad_values) msg = (_('Quota value(s) cannot be less than the current usage ' 'value(s): %s.') % value_str) raise forms.ValidationError(msg) return cleaned_data class Meta(object): name = _("Quota") slug = 'update_quotas' help_text = _("Set maximum quotas for the project.") class CreateProjectQuotaAction(ProjectQuotaAction): class Meta(object): name = _("Quota") slug = 'create_quotas' help_text = _("Set maximum quotas for the project.") class UpdateProjectQuota(workflows.Step): action_class = UpdateProjectQuotaAction template_name = COMMON_HORIZONTAL_TEMPLATE depends_on = ("project_id",) contributes = quotas.QUOTA_FIELDS class CreateProjectQuota(workflows.Step): action_class = CreateProjectQuotaAction template_name = COMMON_HORIZONTAL_TEMPLATE depends_on = ("project_id",) contributes = quotas.QUOTA_FIELDS class CreateProjectInfoAction(workflows.Action): # Hide the domain_id and domain_name by default domain_id = forms.CharField(label=_("Domain ID"), required=False, widget=forms.HiddenInput()) domain_name = forms.CharField(label=_("Domain Name"), required=False, widget=forms.HiddenInput()) name = forms.CharField(label=_("Name"), max_length=64) description = forms.CharField(widget=forms.widgets.Textarea( attrs={'rows': 4}), label=_("Description"), required=False) enabled = forms.BooleanField(label=_("Enabled"), required=False, initial=True) def __init__(self, request, *args, **kwargs): super(CreateProjectInfoAction, self).__init__(request, *args, **kwargs) # For keystone V3, display the two fields in read-only if keystone.VERSIONS.active >= 3: readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'}) self.fields["domain_id"].widget = readonlyInput self.fields["domain_name"].widget = readonlyInput class Meta(object): name = _("Project Information") help_text = _("Create a project to organize users.") class CreateProjectInfo(workflows.Step): action_class = CreateProjectInfoAction template_name = COMMON_HORIZONTAL_TEMPLATE contributes = ("domain_id", "domain_name", "project_id", "name", "description", "enabled") class UpdateProjectMembersAction(workflows.MembershipAction): def __init__(self, request, *args, **kwargs): super(UpdateProjectMembersAction, self).__init__(request, *args, **kwargs) err_msg = _('Unable to retrieve user list. Please try again later.') # Use the domain_id from the project domain_id = self.initial.get("domain_id", None) project_id = '' if 'project_id' in self.initial: project_id = self.initial['project_id'] # Get the default role try: default_role = api.keystone.get_default_role(self.request) # Default role is necessary to add members to a project if default_role is None: default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None) msg = (_('Could not find default role "%s" in Keystone') % default) raise exceptions.NotFound(msg) except Exception: exceptions.handle(self.request, err_msg, redirect=reverse(INDEX_URL)) default_role_name = self.get_default_role_field_name() self.fields[default_role_name] = forms.CharField(required=False) self.fields[default_role_name].initial = default_role.id # Get list of available users all_users = [] try: all_users = api.keystone.user_list(request, domain=domain_id) except Exception: exceptions.handle(request, err_msg) users_list = [(user.id, user.name) for user in all_users] # Get list of roles role_list = [] try: role_list = api.keystone.role_list(request) except Exception: exceptions.handle(request, err_msg, redirect=reverse(INDEX_URL)) for role in role_list: field_name = self.get_member_field_name(role.id) label = role.name self.fields[field_name] = forms.MultipleChoiceField(required=False, label=label) self.fields[field_name].choices = users_list self.fields[field_name].initial = [] # Figure out users & roles if project_id: try: users_roles = api.keystone.get_project_users_roles(request, project_id) except Exception: exceptions.handle(request, err_msg, redirect=reverse(INDEX_URL)) for user_id in users_roles: roles_ids = users_roles[user_id] for role_id in roles_ids: field_name = self.get_member_field_name(role_id) self.fields[field_name].initial.append(user_id) class Meta(object): name = _("Project Members") slug = PROJECT_USER_MEMBER_SLUG class UpdateProjectMembers(workflows.UpdateMembersStep): action_class = UpdateProjectMembersAction available_list_title = _("All Users") members_list_title = _("Project Members") no_available_text = _("No users found.") no_members_text = _("No users.") def contribute(self, data, context): if data: try: roles = api.keystone.role_list(self.workflow.request) except Exception: exceptions.handle(self.workflow.request, _('Unable to retrieve user list.')) post = self.workflow.request.POST for role in roles: field = self.get_member_field_name(role.id) context[field] = post.getlist(field) return context class UpdateProjectGroupsAction(workflows.MembershipAction): def __init__(self, request, *args, **kwargs): super(UpdateProjectGroupsAction, self).__init__(request, *args, **kwargs) err_msg = _('Unable to retrieve group list. Please try again later.') # Use the domain_id from the project domain_id = self.initial.get("domain_id", None) project_id = '' if 'project_id' in self.initial: project_id = self.initial['project_id'] # Get the default role try: default_role = api.keystone.get_default_role(self.request) # Default role is necessary to add members to a project if default_role is None: default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None) msg = (_('Could not find default role "%s" in Keystone') % default) raise exceptions.NotFound(msg) except Exception: exceptions.handle(self.request, err_msg, redirect=reverse(INDEX_URL)) default_role_name = self.get_default_role_field_name() self.fields[default_role_name] = forms.CharField(required=False) self.fields[default_role_name].initial = default_role.id # Get list of available groups all_groups = [] try: all_groups = api.keystone.group_list(request, domain=domain_id) except Exception: exceptions.handle(request, err_msg) groups_list = [(group.id, group.name) for group in all_groups] # Get list of roles role_list = [] try: role_list = api.keystone.role_list(request) except Exception: exceptions.handle(request, err_msg, redirect=reverse(INDEX_URL)) for role in role_list: field_name = self.get_member_field_name(role.id) label = role.name self.fields[field_name] = forms.MultipleChoiceField(required=False, label=label) self.fields[field_name].choices = groups_list self.fields[field_name].initial = [] # Figure out groups & roles if project_id: try: groups_roles = api.keystone.get_project_groups_roles( request, project_id) except Exception: exceptions.handle(request, err_msg, redirect=reverse(INDEX_URL)) for group_id in groups_roles: roles_ids = groups_roles[group_id] for role_id in roles_ids: field_name = self.get_member_field_name(role_id) self.fields[field_name].initial.append(group_id) class Meta(object): name = _("Project Groups") slug = PROJECT_GROUP_MEMBER_SLUG class UpdateProjectGroups(workflows.UpdateMembersStep): action_class = UpdateProjectGroupsAction available_list_title = _("All Groups") members_list_title = _("Project Groups") no_available_text = _("No groups found.") no_members_text = _("No groups.") def contribute(self, data, context): if data: try: roles = api.keystone.role_list(self.workflow.request) except Exception: exceptions.handle(self.workflow.request, _('Unable to retrieve role list.')) post = self.workflow.request.POST for role in roles: field = self.get_member_field_name(role.id) context[field] = post.getlist(field) return context class CommonQuotaWorkflow(workflows.Workflow): def _update_project_quota(self, request, data, project_id): # Update the project quota. nova_data = dict( [(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS]) nova.tenant_quota_update(request, project_id, **nova_data) if base.is_service_enabled(request, 'volume'): cinder_data = dict([(key, data[key]) for key in quotas.CINDER_QUOTA_FIELDS]) cinder.tenant_quota_update(request, project_id, **cinder_data) if api.base.is_service_enabled(request, 'network') and \ api.neutron.is_quotas_extension_supported(request): neutron_data = {} disabled_quotas = quotas.get_disabled_quotas(request) for key in quotas.NEUTRON_QUOTA_FIELDS: if key not in disabled_quotas: neutron_data[key] = data[key] api.neutron.tenant_quota_update(request, project_id, **neutron_data) class CreateProject(CommonQuotaWorkflow): slug = "create_project" name = _("Create Project") finalize_button_name = _("Create Project") success_message = _('Created new project "%s".') failure_message = _('Unable to create project "%s".') success_url = "horizon:identity:projects:index" default_steps = (CreateProjectInfo, UpdateProjectMembers, CreateProjectQuota) def __init__(self, request=None, context_seed=None, entry_point=None, *args, **kwargs): if PROJECT_GROUP_ENABLED: self.default_steps = (CreateProjectInfo, UpdateProjectMembers, UpdateProjectGroups, CreateProjectQuota) super(CreateProject, self).__init__(request=request, context_seed=context_seed, entry_point=entry_point, *args, **kwargs) def format_status_message(self, message): return message % self.context.get('name', 'unknown project') def _create_project(self, request, data): # create the project domain_id = data['domain_id'] try: desc = data['description'] self.object = api.keystone.tenant_create(request, name=data['name'], description=desc, enabled=data['enabled'], domain=domain_id) return self.object except Exception: exceptions.handle(request, ignore=True) return def _update_project_members(self, request, data, project_id): # update project members users_to_add = 0 try: available_roles = api.keystone.role_list(request) member_step = self.get_step(PROJECT_USER_MEMBER_SLUG) # count how many users are to be added for role in available_roles: field_name = member_step.get_member_field_name(role.id) role_list = data[field_name] users_to_add += len(role_list) # add new users to project for role in available_roles: field_name = member_step.get_member_field_name(role.id) role_list = data[field_name] users_added = 0 for user in role_list: api.keystone.add_tenant_user_role(request, project=project_id, user=user, role=role.id) users_added += 1 users_to_add -= users_added except Exception: if PROJECT_GROUP_ENABLED: group_msg = _(", add project groups") else: group_msg = "" exceptions.handle(request, _('Failed to add %(users_to_add)s project ' 'members%(group_msg)s and set project quotas.') % {'users_to_add': users_to_add, 'group_msg': group_msg}) finally: auth_utils.remove_project_cache(request.user.token.id) def _update_project_groups(self, request, data, project_id): # update project groups groups_to_add = 0 try: available_roles = api.keystone.role_list(request) member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG) # count how many groups are to be added for role in available_roles: field_name = member_step.get_member_field_name(role.id) role_list = data[field_name] groups_to_add += len(role_list) # add new groups to project for role in available_roles: field_name = member_step.get_member_field_name(role.id) role_list = data[field_name] groups_added = 0 for group in role_list: api.keystone.add_group_role(request, role=role.id, group=group, project=project_id) groups_added += 1 groups_to_add -= groups_added except Exception: exceptions.handle(request, _('Failed to add %s project groups ' 'and update project quotas.') % groups_to_add) def _update_project_quota(self, request, data, project_id): try: super(CreateProject, self)._update_project_quota( request, data, project_id) except Exception: exceptions.handle(request, _('Unable to set project quotas.')) def handle(self, request, data): project = self._create_project(request, data) if not project: return False project_id = project.id self._update_project_members(request, data, project_id) if PROJECT_GROUP_ENABLED: self._update_project_groups(request, data, project_id) self._update_project_quota(request, data, project_id) return True class UpdateProjectInfoAction(CreateProjectInfoAction): enabled = forms.BooleanField(required=False, label=_("Enabled")) def __init__(self, request, initial, *args, **kwargs): super(UpdateProjectInfoAction, self).__init__( request, initial, *args, **kwargs) if initial['project_id'] == request.user.project_id: self.fields['enabled'].widget.attrs['disabled'] = True self.fields['enabled'].help_text = _( 'You cannot disable your current project') def clean(self): cleaned_data = super(UpdateProjectInfoAction, self).clean() # NOTE(tsufiev): in case the current project is being edited, its # 'enabled' field is disabled to prevent changing the field value # which is always `True` for the current project (because the user # logged in it). Since Django treats disabled checkbox as providing # `False` value even if its initial value is `True`, we need to # restore the original `True` value of 'enabled' field here. if self.fields['enabled'].widget.attrs.get('disabled', False): cleaned_data['enabled'] = True return cleaned_data class Meta(object): name = _("Project Information") slug = 'update_info' help_text = _("Edit the project details.") class UpdateProjectInfo(workflows.Step): action_class = UpdateProjectInfoAction template_name = COMMON_HORIZONTAL_TEMPLATE depends_on = ("project_id",) contributes = ("domain_id", "domain_name", "name", "description", "enabled") class UpdateProject(CommonQuotaWorkflow): slug = "update_project" name = _("Edit Project") finalize_button_name = _("Save") success_message = _('Modified project "%s".') failure_message = _('Unable to modify project "%s".') success_url = "horizon:identity:projects:index" default_steps = (UpdateProjectInfo, UpdateProjectMembers, UpdateProjectQuota) def __init__(self, request=None, context_seed=None, entry_point=None, *args, **kwargs): if PROJECT_GROUP_ENABLED: self.default_steps = (UpdateProjectInfo, UpdateProjectMembers, UpdateProjectGroups, UpdateProjectQuota) super(UpdateProject, self).__init__(request=request, context_seed=context_seed, entry_point=entry_point, *args, **kwargs) def format_status_message(self, message): return message % self.context.get('name', 'unknown project') @memoized.memoized_method def _get_available_roles(self, request): return api.keystone.role_list(request) def _update_project(self, request, data): # update project info try: project_id = data['project_id'] return api.keystone.tenant_update( request, project_id, name=data['name'], description=data['description'], enabled=data['enabled']) except Exception: exceptions.handle(request, ignore=True) return def _add_roles_to_users(self, request, data, project_id, user_id, role_ids, available_roles): member_step = self.get_step(PROJECT_USER_MEMBER_SLUG) current_role_ids = list(role_ids) for role in available_roles: field_name = member_step.get_member_field_name(role.id) # Check if the user is in the list of users with this role. if user_id in data[field_name]: # Add it if necessary if role.id not in current_role_ids: # user role has changed api.keystone.add_tenant_user_role( request, project=project_id, user=user_id, role=role.id) else: # User role is unchanged, so remove it from the # remaining roles list to avoid removing it later. index = current_role_ids.index(role.id) current_role_ids.pop(index) return current_role_ids def _remove_roles_from_user(self, request, project_id, user_id, current_role_ids): for id_to_delete in current_role_ids: api.keystone.remove_tenant_user_role( request, project=project_id, user=user_id, role=id_to_delete) def _is_removing_self_admin_role(self, request, project_id, user_id, available_roles, current_role_ids): is_current_user = user_id == request.user.id is_current_project = project_id == request.user.tenant_id available_admin_role_ids = [role.id for role in available_roles if role.name.lower() == 'admin'] admin_roles = [role for role in current_role_ids if role in available_admin_role_ids] if len(admin_roles): removing_admin = any([role in current_role_ids for role in admin_roles]) else: removing_admin = False if is_current_user and is_current_project and removing_admin: # Cannot remove "admin" role on current(admin) project msg = _('You cannot revoke your administrative privileges ' 'from the project you are currently logged into. ' 'Please switch to another project with ' 'administrative privileges or remove the ' 'administrative role manually via the CLI.') messages.warning(request, msg) return True else: return False def _update_project_members(self, request, data, project_id): # update project members users_to_modify = 0 # Project-user member step member_step = self.get_step(PROJECT_USER_MEMBER_SLUG) try: # Get our role options available_roles = self._get_available_roles(request) # Get the users currently associated with this project so we # can diff against it. users_roles = api.keystone.get_project_users_roles( request, project=project_id) users_to_modify = len(users_roles) for user_id in users_roles.keys(): # Check if there have been any changes in the roles of # Existing project members. current_role_ids = list(users_roles[user_id]) modified_role_ids = self._add_roles_to_users( request, data, project_id, user_id, current_role_ids, available_roles) # Prevent admins from doing stupid things to themselves. removing_admin = self._is_removing_self_admin_role( request, project_id, user_id, available_roles, modified_role_ids) # Otherwise go through and revoke any removed roles. if not removing_admin: self._remove_roles_from_user(request, project_id, user_id, modified_role_ids) users_to_modify -= 1 # Grant new roles on the project. for role in available_roles: field_name = member_step.get_member_field_name(role.id) # Count how many users may be added for exception handling. users_to_modify += len(data[field_name]) for role in available_roles: users_added = 0 field_name = member_step.get_member_field_name(role.id) for user_id in data[field_name]: if user_id not in users_roles: api.keystone.add_tenant_user_role(request, project=project_id, user=user_id, role=role.id) users_added += 1 users_to_modify -= users_added return True except Exception: if PROJECT_GROUP_ENABLED: group_msg = _(", update project groups") else: group_msg = "" exceptions.handle(request, _('Failed to modify %(users_to_modify)s' ' project members%(group_msg)s and ' 'update project quotas.') % {'users_to_modify': users_to_modify, 'group_msg': group_msg}) return False finally: auth_utils.remove_project_cache(request.user.token.id) def _update_project_groups(self, request, data, project_id, domain_id): # update project groups groups_to_modify = 0 member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG) try: available_roles = self._get_available_roles(request) # Get the groups currently associated with this project so we # can diff against it. project_groups = api.keystone.group_list(request, domain=domain_id, project=project_id) groups_to_modify = len(project_groups) for group in project_groups: # Check if there have been any changes in the roles of # Existing project members. current_roles = api.keystone.roles_for_group( self.request, group=group.id, project=project_id) current_role_ids = [role.id for role in current_roles] for role in available_roles: # Check if the group is in the list of groups with # this role. field_name = member_step.get_member_field_name(role.id) if group.id in data[field_name]: # Add it if necessary if role.id not in current_role_ids: # group role has changed api.keystone.add_group_role( request, role=role.id, group=group.id, project=project_id) else: # Group role is unchanged, so remove it from # the remaining roles list to avoid removing it # later. index = current_role_ids.index(role.id) current_role_ids.pop(index) # Revoke any removed roles. for id_to_delete in current_role_ids: api.keystone.remove_group_role(request, role=id_to_delete, group=group.id, project=project_id) groups_to_modify -= 1 # Grant new roles on the project. for role in available_roles: field_name = member_step.get_member_field_name(role.id) # Count how many groups may be added for error handling. groups_to_modify += len(data[field_name]) for role in available_roles: groups_added = 0 field_name = member_step.get_member_field_name(role.id) for group_id in data[field_name]: if not filter(lambda x: group_id == x.id, project_groups): api.keystone.add_group_role(request, role=role.id, group=group_id, project=project_id) groups_added += 1 groups_to_modify -= groups_added return True except Exception: exceptions.handle(request, _('Failed to modify %s project ' 'members, update project groups ' 'and update project quotas.') % groups_to_modify) return False def _update_project_quota(self, request, data, project_id): try: super(UpdateProject, self)._update_project_quota( request, data, project_id) return True except Exception: exceptions.handle(request, _('Modified project information and ' 'members, but unable to modify ' 'project quotas.')) return False def handle(self, request, data): # FIXME(gabriel): This should be refactored to use Python's built-in # sets and do this all in a single "roles to add" and "roles to remove" # pass instead of the multi-pass thing happening now. project = self._update_project(request, data) if not project: return False project_id = data['project_id'] # Use the domain_id from the project if available domain_id = getattr(project, "domain_id", '') ret = self._update_project_members(request, data, project_id) if not ret: return False if PROJECT_GROUP_ENABLED: ret = self._update_project_groups(request, data, project_id, domain_id) if not ret: return False ret = self._update_project_quota(request, data, project_id) if not ret: return False return True
apache-2.0
savi-dev/nova
nova/scheduler/chance.py
6
4430
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance (Random) Scheduler implementation """ import random from nova import exception from nova import flags from nova.scheduler import driver FLAGS = flags.FLAGS class ChanceScheduler(driver.Scheduler): """Implements Scheduler as a random node selector.""" def _filter_hosts(self, request_spec, hosts, filter_properties): """Filter a list of hosts based on request_spec.""" ignore_hosts = filter_properties.get('ignore_hosts', []) hosts = [host for host in hosts if host not in ignore_hosts] return hosts def _schedule(self, context, topic, request_spec, filter_properties): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, filter_properties) if not hosts: msg = _("Could not find another compute") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))] def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties): """Create and run an instance or instances""" instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: host = self._schedule(context, 'compute', request_spec, filter_properties) updated_instance = driver.instance_update_db(context, instance_uuid, host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) except Exception as ex: # NOTE(vish): we don't reraise the exception here to make sure # that all instances in the request get set to # error properly driver.handle_schedule_error(context, ex, instance_uuid, request_spec) def schedule_prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, reservations): """Select a target for resize.""" host = self._schedule(context, 'compute', request_spec, filter_properties) self.compute_rpcapi.prep_resize(context, image, instance, instance_type, host, reservations) def schedule_create_volume(self, context, volume_id, snapshot_id, image_id): """Picks a host that is up at random.""" host = self._schedule(context, FLAGS.volume_topic, None, {}) driver.cast_to_host(context, FLAGS.volume_topic, host, 'create_volume', volume_id=volume_id, snapshot_id=snapshot_id, image_id=image_id)
apache-2.0
dasseclab/dasseclab
clones/routersploit/tests/exploits/routers/mikrotik/test_winbox_auth_bypass_creds_disclosure.py
1
3375
from routersploit.modules.exploits.routers.mikrotik.winbox_auth_bypass_creds_disclosure import Exploit def test_check_success(tcp_target): command_mock1 = tcp_target.get_command_mock( b"\x68\x01\x00\x66\x4d\x32\x05\x00\xff\x01\x06\x00\xff\x09\x05\x07" b"\x00\xff\x09\x07\x01\x00\x00\x21\x35\x2f\x2f\x2f\x2f\x2f\x2e\x2f" b"\x2e\x2e\x2f\x2f\x2f\x2f\x2f\x2f\x2e\x2f\x2e\x2e\x2f\x2f\x2f\x2f" b"\x2f\x2f\x2e\x2f\x2e\x2e\x2f\x66\x6c\x61\x73\x68\x2f\x72\x77\x2f" b"\x73\x74\x6f\x72\x65\x2f\x75\x73\x65\x72\x2e\x64\x61\x74\x02\x00" b"\xff\x88\x02\x00\x00\x00\x00\x00\x08\x00\x00\x00\x01\x00\xff\x88" b"\x02\x00\x02\x00\x00\x00\x02\x00\x00\x00" ) command_mock1.return_value = ( b"\x37\x01\x00\x35\x4d\x32\x01\x00\xff\x88\x02\x00\x00\x00\x00\x00" b"\x08\x00\x00\x00\x02\x00\xff\x88\x02\x00\x02\x00\x00\x00\x02\x00" b"\x00\x00\x01\x00\xfe\x09\x1b\x03\x00\xff\x09\x02\x02\x00\x00\x08" b"\x36\x01\x00\x00\x06\x00\xff\x09\x05" ) command_mock2 = tcp_target.get_command_mock( b"\x3b\x01\x00\x39\x4d\x32\x05\x00\xff\x01\x06\x00\xff\x09\x06\x01" b"\x00\xfe\x09\x1b\x02\x00\x00\x08\x00\x80\x00\x00\x07\x00\xff\x09" b"\x04\x02\x00\xff\x88\x02\x00\x00\x00\x00\x00\x08\x00\x00\x00\x01" b"\x00\xff\x88\x02\x00\x02\x00\x00\x00\x02\x00\x00\x00" ) command_mock2.return_value = ( b"\xff\x01\x01\x68\x4d\x32\x01\x00\xff\x88\x02\x00\x00\x00\x00\x00" b"\x08\x00\x00\x00\x02\x00\xff\x88\x02\x00\x02\x00\x00\x00\x02\x00" b"\x00\x00\x04\x00\x00\x01\x03\x00\xff\x09\x02\x06\x00\xff\x09\x06" b"\x03\x00\x00\x30\x36\x01\x57\x00\x4d\x32\x10\x00\x00\xa8\x00\x00" b"\x1c\x00\x00\x01\x0a\x00\xfe\x00\x05\x00\x00\x09\x00\x06\x00\x00" b"\x09\x00\x0b\x00\x00\x08\xfe\xff\x07\x00\x12\x00\x00\x09\x02\x01" b"\x00\xfe\x09\x02\x02\x00\x00\x09\x03\x09\x00\xfe\x21\x00\x11\x00" b"\x00\x21\x10\x76\x08\xc6\x04\x66\xa6\x3d\x2a\xb7\xcd\xec\x68\xe2" b"\x6e\x44\x0e\x01\x00\x00\x21\x05\x75\x73\x65\x72\x31\x6d\x69\x6e" b"\x6a\x00\x4d\x32\x10\x00\x00\xa8\x00\x00\x1c\x00\x00\x01\x0a\x00" b"\xfe\x00\x05\x00\x00\x09\x00\x06\x00\x00\x09\x00\x0b\x00\x00\x08" b"\xfe\xff\x07\x00\x12\x00\x00\x09\x02\x01\x00\xfe\x09\x01\x02\x00" b"\x00\x09\x03\x09\x00\xfe\x21\x13\x73\x79\x73\x74\x65\x6d\x20\x64" b"\x65\x66\x61\x75\x6c\x74\x20\x75\x73\x65\x72\x11\x00\x00\x21\x10" b"\x29\xdb\xb3\x6f\x27\x5a\x0e\x2d\x09\xd5\xfb\x27\xb1\x44\xec\x93" b"\x01\x00\x00\x21\x05\x61\x64\x6d\x69\x6e\x72\x00\x4d\x32\x10\x00" b"\x00\x6b\xff\xa8\x00\x00\x1c\x00\x00\x01\x0a\x00\xfe\x00\x05\x00" b"\x00\x09\x00\x06\x00\x00\x09\x00\x1f\x00\x00\x08\x36\x2b\x35\x5b" b"\x0b\x00\x00\x08\xfe\xff\x07\x00\x12\x00\x00\x09\x02\x01\x00\xfe" b"\x09\x01\x02\x00\x00\x09\x03\x09\x00\xfe\x21\x13\x73\x79\x73\x74" b"\x65\x6d\x20\x64\x65\x66\x61\x75\x6c\x74\x20\x75\x73\x65\x72\x11" b"\x00\x00\x21\x10\x29\xdb\xb3\x6f\x27\x5a\x0e\x2d\x09\xd5\xfb\x27" b"\xb1\x44\xec\x93\x01\x00\x00\x21\x05\x61\x64\x6d\x69\x6e" ) exploit = Exploit() assert exploit.target == "" assert exploit.port == 8291 exploit.target = tcp_target.host exploit.port = tcp_target.port assert exploit.check() assert exploit.run() is None
gpl-2.0
dwks/silvius-backend
kaldigstserver/decoder2.py
1
8962
""" Created on May 17, 2013 @author: tanel """ import gi gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst GObject.threads_init() Gst.init(None) import logging import thread import os logger = logging.getLogger(__name__) import pdb class DecoderPipeline2(object): def __init__(self, conf={}): logger.info("Creating decoder using conf: %s" % conf) self.create_pipeline(conf) self.outdir = conf.get("out-dir", None) if not os.path.exists(self.outdir): os.makedirs(self.outdir) elif not os.path.isdir(self.outdir): raise Exception("Output directory %s already exists as a file" % self.outdir) self.result_handler = None self.full_result_handler = None self.eos_handler = None self.error_handler = None self.request_id = "<undefined>" def create_pipeline(self, conf): self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc") self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin") self.audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert") self.audioresample = Gst.ElementFactory.make("audioresample", "audioresample") self.tee = Gst.ElementFactory.make("tee", "tee") self.queue1 = Gst.ElementFactory.make("queue", "queue1") self.filesink = Gst.ElementFactory.make("filesink", "filesink") self.queue2 = Gst.ElementFactory.make("queue", "queue2") self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr") self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink") # This needs to be set first if "use-threaded-decoder" in conf["decoder"]: self.asr.set_property("use-threaded-decoder", conf["decoder"]["use-threaded-decoder"]) decoder_config = conf.get("decoder", {}) if 'nnet-mode' in decoder_config: logger.info("Setting decoder property: %s = %s" % ('nnet-mode', decoder_config['nnet-mode'])) self.asr.set_property('nnet-mode', decoder_config['nnet-mode']) del decoder_config['nnet-mode'] for (key, val) in decoder_config.iteritems(): if key != "use-threaded-decoder": logger.info("Setting decoder property: %s = %s" % (key, val)) self.asr.set_property(key, val) self.appsrc.set_property("is-live", True) self.filesink.set_property("location", "/dev/null") logger.info('Created GStreamer elements') self.pipeline = Gst.Pipeline() for element in [self.appsrc, self.decodebin, self.audioconvert, self.audioresample, self.tee, self.queue1, self.filesink, self.queue2, self.asr, self.fakesink]: logger.debug("Adding %s to the pipeline" % element) self.pipeline.add(element) logger.info('Linking GStreamer elements') self.appsrc.link(self.decodebin) #self.appsrc.link(self.audioconvert) self.decodebin.connect('pad-added', self._connect_decoder) self.audioconvert.link(self.audioresample) self.audioresample.link(self.tee) self.tee.link(self.queue1) self.queue1.link(self.filesink) self.tee.link(self.queue2) self.queue2.link(self.asr) self.asr.link(self.fakesink) # Create bus and connect several handlers self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.enable_sync_message_emission() self.bus.connect('message::eos', self._on_eos) self.bus.connect('message::error', self._on_error) #self.bus.connect('message::cutter', self._on_cutter) self.asr.connect('partial-result', self._on_partial_result) self.asr.connect('final-result', self._on_final_result) self.asr.connect('full-final-result', self._on_full_final_result) logger.info("Setting pipeline to READY") self.pipeline.set_state(Gst.State.READY) logger.info("Set pipeline to READY") def _connect_decoder(self, element, pad): logger.info("%s: Connecting audio decoder" % self.request_id) pad.link(self.audioconvert.get_static_pad("sink")) logger.info("%s: Connected audio decoder" % self.request_id) def _on_partial_result(self, asr, hyp): logger.info("%s: Got partial result: %s" % (self.request_id, hyp.decode('utf8'))) if self.result_handler: self.result_handler(hyp.decode('utf8'), False) def _on_final_result(self, asr, hyp): logger.info("%s: Got final result: %s" % (self.request_id, hyp.decode('utf8'))) if self.result_handler: self.result_handler(hyp.decode('utf8'), True) def _on_full_final_result(self, asr, result_json): logger.info("%s: Got full final result: %s" % (self.request_id, result_json.decode('utf8'))) if self.full_result_handler: self.full_result_handler(result_json) def _on_error(self, bus, msg): self.error = msg.parse_error() logger.error(self.error) self.finish_request() if self.error_handler: self.error_handler(self.error[0].message) def _on_eos(self, bus, msg): logger.info('%s: Pipeline received eos signal' % self.request_id) #self.decodebin.unlink(self.audioconvert) self.finish_request() if self.eos_handler: self.eos_handler[0](self.eos_handler[1]) def get_adaptation_state(self): return self.asr.get_property("adaptation-state") def set_adaptation_state(self, adaptation_state): """Sets the adaptation state to a certian value, previously retrieved using get_adaptation_state() Should be called after init_request(..) """ return self.asr.set_property("adaptation-state", adaptation_state) def finish_request(self): logger.info("%s: Resetting decoder state" % self.request_id) if self.outdir: self.filesink.set_state(Gst.State.NULL) self.filesink.set_property('location', "/dev/null") self.filesink.set_state(Gst.State.PLAYING) self.pipeline.set_state(Gst.State.NULL) self.request_id = "<undefined>" def init_request(self, id, caps_str): self.request_id = id logger.info("%s: Initializing request" % (self.request_id)) if caps_str and len(caps_str) > 0: logger.info("%s: Setting caps to %s" % (self.request_id, caps_str)) caps = Gst.caps_from_string(caps_str) self.appsrc.set_property("caps", caps) else: #caps = Gst.caps_from_string("") self.appsrc.set_property("caps", None) #self.pipeline.set_state(Gst.State.READY) pass #self.appsrc.set_state(Gst.State.PAUSED) if self.outdir: self.pipeline.set_state(Gst.State.PAUSED) self.filesink.set_state(Gst.State.NULL) self.filesink.set_property('location', "%s/%s.raw" % (self.outdir, id)) self.filesink.set_state(Gst.State.PLAYING) #self.filesink.set_state(Gst.State.PLAYING) #self.decodebin.set_state(Gst.State.PLAYING) self.pipeline.set_state(Gst.State.PLAYING) self.filesink.set_state(Gst.State.PLAYING) # push empty buffer (to avoid hang on client diconnect) #buf = Gst.Buffer.new_allocate(None, 0, None) #self.appsrc.emit("push-buffer", buf) # reset adaptation state self.set_adaptation_state("") def process_data(self, data): logger.debug('%s: Pushing buffer of size %d to pipeline' % (self.request_id, len(data))) buf = Gst.Buffer.new_allocate(None, len(data), None) buf.fill(0, data) self.appsrc.emit("push-buffer", buf) logger.debug('%s: Pushing buffer done' % self.request_id) def end_request(self): logger.info("%s: Pushing EOS to pipeline" % self.request_id) self.appsrc.emit("end-of-stream") def set_result_handler(self, handler): self.result_handler = handler def set_full_result_handler(self, handler): self.full_result_handler = handler def set_eos_handler(self, handler, user_data=None): self.eos_handler = (handler, user_data) def set_error_handler(self, handler): self.error_handler = handler def cancel(self): logger.info("%s: Sending EOS to pipeline in order to cancel processing" % self.request_id) self.appsrc.emit("end-of-stream") #self.asr.set_property("silent", True) #self.pipeline.set_state(Gst.State.NULL) #if (self.pipeline.get_state() == Gst.State.PLAYING): #logger.debug("Sending EOS to pipeline") #self.pipeline.send_event(Gst.Event.new_eos()) #self.pipeline.set_state(Gst.State.READY) logger.info("%s: Cancelled pipeline" % self.request_id)
bsd-2-clause
tklaus/ansible
lib/ansible/playbook/role/metadata.py
80
3201
# (c) 2014 Michael DeHaan, <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from six import iteritems, string_types from ansible.errors import AnsibleParserError from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.helpers import load_list_of_roles from ansible.playbook.role.include import RoleInclude __all__ = ['RoleMetadata'] class RoleMetadata(Base): ''' This class wraps the parsing and validation of the optional metadata within each Role (meta/main.yml). ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) _dependencies = FieldAttribute(isa='list', default=[]) _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): self._owner = owner super(RoleMetadata, self).__init__() @staticmethod def load(data, owner, variable_manager=None, loader=None): ''' Returns a new RoleMetadata object based on the datastructure passed in. ''' if not isinstance(data, dict): raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name()) m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader) return m def _load_dependencies(self, attr, ds): ''' This is a helper loading function for the dependencies list, which returns a list of RoleInclude objects ''' if ds is None: ds = [] current_role_path = None if self._owner: current_role_path = os.path.dirname(self._owner._role_path) return load_list_of_roles(ds, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader) def _load_galaxy_info(self, attr, ds): ''' This is a helper loading function for the galaxy info entry in the metadata, which returns a GalaxyInfo object rather than a simple dictionary. ''' return ds def serialize(self): return dict( allow_duplicates = self._allow_duplicates, dependencies = self._dependencies, ) def deserialize(self, data): setattr(self, 'allow_duplicates', data.get('allow_duplicates', False)) setattr(self, 'dependencies', data.get('dependencies', []))
gpl-3.0
agiliq/nginx-python-buildpack
vendor/pip-1.5.4/pip/_vendor/html5lib/treebuilders/etree.py
915
12621
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type import re from . import _base from .. import ihatexml from .. import constants from ..constants import namespaces from ..utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") def getETreeBuilder(ElementTreeImplementation, fullTree=False): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag class Element(_base.Node): def __init__(self, name, namespace=None): self._name = name self._namespace = namespace self._element = ElementTree.Element(self._getETreeTag(name, namespace)) if namespace is None: self.nameTuple = namespaces["html"], self._name else: self.nameTuple = self._namespace, self._name self.parent = None self._childNodes = [] self._flags = [] def _getETreeTag(self, name, namespace): if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) return etree_tag def _setName(self, name): self._name = name self._element.tag = self._getETreeTag(self._name, self._namespace) def _getName(self): return self._name name = property(_getName, _setName) def _setNamespace(self, namespace): self._namespace = namespace self._element.tag = self._getETreeTag(self._name, self._namespace) def _getNamespace(self): return self._namespace namespace = property(_getNamespace, _setNamespace) def _getAttributes(self): return self._element.attrib def _setAttributes(self, attributes): # Delete existing attributes first # XXX - there may be a better way to do this... for key in list(self._element.attrib.keys()): del self._element.attrib[key] for key, value in attributes.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], key[1]) else: name = key self._element.set(name, value) attributes = property(_getAttributes, _setAttributes) def _getChildNodes(self): return self._childNodes def _setChildNodes(self, value): del self._element[:] self._childNodes = [] for element in value: self.insertChild(element) childNodes = property(_getChildNodes, _setChildNodes) def hasContent(self): """Return true if the node has children or text""" return bool(self._element.text or len(self._element)) def appendChild(self, node): self._childNodes.append(node) self._element.append(node._element) node.parent = self def insertBefore(self, node, refNode): index = list(self._element).index(refNode._element) self._element.insert(index, node._element) node.parent = self def removeChild(self, node): self._element.remove(node._element) node.parent = None def insertText(self, data, insertBefore=None): if not(len(self._element)): if not self._element.text: self._element.text = "" self._element.text += data elif insertBefore is None: # Insert the text as the tail of the last child element if not self._element[-1].tail: self._element[-1].tail = "" self._element[-1].tail += data else: # Insert the text before the specified node children = list(self._element) index = children.index(insertBefore._element) if index > 0: if not self._element[index - 1].tail: self._element[index - 1].tail = "" self._element[index - 1].tail += data else: if not self._element.text: self._element.text = "" self._element.text += data def cloneNode(self): element = type(self)(self.name, self.namespace) for name, value in self.attributes.items(): element.attributes[name] = value return element def reparentChildren(self, newParent): if newParent.childNodes: newParent.childNodes[-1]._element.tail += self._element.text else: if not newParent._element.text: newParent._element.text = "" if self._element.text is not None: newParent._element.text += self._element.text self._element.text = "" _base.Node.reparentChildren(self, newParent) class Comment(Element): def __init__(self, data): # Use the superclass constructor to set all properties on the # wrapper element self._element = ElementTree.Comment(data) self.parent = None self._childNodes = [] self._flags = [] def _getData(self): return self._element.text def _setData(self, value): self._element.text = value data = property(_getData, _setData) class DocumentType(Element): def __init__(self, name, publicId, systemId): Element.__init__(self, "<!DOCTYPE>") self._element.text = name self.publicId = publicId self.systemId = systemId def _getPublicId(self): return self._element.get("publicId", "") def _setPublicId(self, value): if value is not None: self._element.set("publicId", value) publicId = property(_getPublicId, _setPublicId) def _getSystemId(self): return self._element.get("systemId", "") def _setSystemId(self, value): if value is not None: self._element.set("systemId", value) systemId = property(_getSystemId, _setSystemId) class Document(Element): def __init__(self): Element.__init__(self, "DOCUMENT_ROOT") class DocumentFragment(Element): def __init__(self): Element.__init__(self, "DOCUMENT_FRAGMENT") def testSerializer(element): rv = [] def serializeElement(element, indent=0): if not(hasattr(element, "tag")): element = element.getroot() if element.tag == "<!DOCTYPE>": if element.get("publicId") or element.get("systemId"): publicId = element.get("publicId") or "" systemId = element.get("systemId") or "" rv.append("""<!DOCTYPE %s "%s" "%s">""" % (element.text, publicId, systemId)) else: rv.append("<!DOCTYPE %s>" % (element.text,)) elif element.tag == "DOCUMENT_ROOT": rv.append("#document") if element.text is not None: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) if element.tail is not None: raise TypeError("Document node cannot have tail") if hasattr(element, "attrib") and len(element.attrib): raise TypeError("Document node cannot have attributes") elif element.tag == ElementTreeCommentType: rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) else: assert isinstance(element.tag, text_type), \ "Expected unicode, got %s, %s" % (type(element.tag), element.tag) nsmatch = tag_regexp.match(element.tag) if nsmatch is None: name = element.tag else: ns, name = nsmatch.groups() prefix = constants.prefixes[ns] name = "%s %s" % (prefix, name) rv.append("|%s<%s>" % (' ' * indent, name)) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = name attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] filter = ihatexml.InfosetFilter() def serializeElement(element): if isinstance(element, ElementTree.ElementTree): element = element.getroot() if element.tag == "<!DOCTYPE>": if element.get("publicId") or element.get("systemId"): publicId = element.get("publicId") or "" systemId = element.get("systemId") or "" rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" % (element.text, publicId, systemId)) else: rv.append("<!DOCTYPE %s>" % (element.text,)) elif element.tag == "DOCUMENT_ROOT": if element.text is not None: rv.append(element.text) if element.tail is not None: raise TypeError("Document node cannot have tail") if hasattr(element, "attrib") and len(element.attrib): raise TypeError("Document node cannot have attributes") for child in element: serializeElement(child) elif element.tag == ElementTreeCommentType: rv.append("<!--%s-->" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (filter.fromXmlName(element.tag),)) else: attr = " ".join(["%s=\"%s\"" % ( filter.fromXmlName(name), value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("</%s>" % (element.tag,)) if element.tail: rv.append(element.tail) serializeElement(element) return "".join(rv) class TreeBuilder(_base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = Element commentClass = Comment fragmentClass = DocumentFragment implementation = ElementTreeImplementation def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._element else: if self.defaultNamespace is not None: return self.document._element.find( "{%s}html" % self.defaultNamespace) else: return self.document._element.find("html") def getFragment(self): return _base.TreeBuilder.getFragment(self)._element return locals() getETreeModule = moduleFactoryFactory(getETreeBuilder)
mit
pdubroy/kurt
build/MacOS/PyInstaller/pyinstaller-svn-r812/Build.py
1
42370
#!/usr/bin/env python # # Build packages using spec files # # Copyright (C) 2005, Giovanni Bajo # Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA import sys import os import shutil import pprint import time import py_compile import tempfile try: from hashlib import md5 except ImportError: from md5 import new as md5 import UserList import mf import archive import iu import carchive import bindepend STRINGTYPE = type('') TUPLETYPE = type((None,)) UNCOMPRESSED, COMPRESSED = range(2) # todo: use pkg_resources here HOMEPATH = os.path.dirname(sys.argv[0]) SPECPATH = None BUILDPATH = None WARNFILE = None rthooks = {} iswin = sys.platform[:3] == 'win' cygwin = sys.platform == 'cygwin' def system(cmd): # This workaround is required because NT shell doesn't work with commands # that start with double quotes (required if there are spaces inside the # command path) if iswin: cmd = 'echo on && ' + cmd os.system(cmd) def _save_data(filename, data): outf = open(filename, 'w') pprint.pprint(data, outf) outf.close() def _load_data(filename): return eval(open(filename, 'r').read().replace("\r\n","\n")) def setupUPXFlags(): f = os.environ.get("UPX", "") is24 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,4) if iswin and is24: # Binaries built with Visual Studio 7.1 require --strip-loadconf # or they won't compress. Configure.py makes sure that UPX is new # enough to support --strip-loadconf. f = "--strip-loadconf " + f # Do not compress any icon, so that additional icons in the executable # can still be externally bound f = "--compress-icons=0 " + f f = "--best " + f os.environ["UPX"] = f def mtime(fnm): try: return os.stat(fnm)[8] except: return 0 def absnormpath(apath): return os.path.abspath(os.path.normpath(apath)) def compile_pycos(toc): """Given a TOC or equivalent list of tuples, generates all the required pyc/pyo files, writing in a local directory if required, and returns the list of tuples with the updated pathnames. """ global BUILDPATH # For those modules that need to be rebuilt, use the build directory # PyInstaller creates during the build process. basepath = "/".join([BUILDPATH, "localpycos"]) new_toc = [] for (nm, fnm, typ) in toc: # Trim the terminal "c" or "o" source_fnm = fnm[:-1] # If the source is newer than the compiled, or the compiled doesn't # exist, we need to perform a build ourselves. if mtime(source_fnm) > mtime(fnm): try: py_compile.compile(source_fnm) except IOError: # If we're compiling on a system directory, probably we don't # have write permissions; thus we compile to a local directory # and change the TOC entry accordingly. ext = os.path.splitext(fnm)[1] if "__init__" not in fnm: # If it's a normal module, use last part of the qualified # name as module name and the first as leading path leading, mod_name = nm.split(".")[:-1], nm.split(".")[-1] else: # In case of a __init__ module, use all the qualified name # as leading path and use "__init__" as the module name leading, mod_name = nm.split("."), "__init__" leading.insert(0, basepath) leading = "/".join(leading) if not os.path.exists(leading): os.makedirs(leading) fnm = "/".join([leading, mod_name + ext]) py_compile.compile(source_fnm, fnm) new_toc.append((nm, fnm, typ)) return new_toc #--- functons for checking guts --- def _check_guts_eq(attr, old, new, last_build): """ rebuild is required if values differ """ if old != new: print "building because %s changed" % attr return True return False def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0): """ rebuild is required if mtimes of files listed in old toc are newer than ast_build if pyc=1, check for .py files, too """ for (nm, fnm, typ) in old: if mtime(fnm) > last_build: print "building because %s changed" % fnm return True elif pyc and mtime(fnm[:-1]) > last_build: print "building because %s changed" % fnm[:-1] return True return False def _check_guts_toc(attr, old, toc, last_build, pyc=0): """ rebuild is required if either toc content changed if mtimes of files listed in old toc are newer than ast_build if pyc=1, check for .py files, too """ return _check_guts_eq (attr, old, toc, last_build) \ or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc) #-- class Target: invcnum = 0 def __init__(self): self.invcnum = Target.invcnum Target.invcnum += 1 self.out = os.path.join(BUILDPATH, 'out%s%d.toc' % (self.__class__.__name__, self.invcnum)) self.outnm = os.path.basename(self.out) self.dependencies = TOC() def __postinit__(self): print "checking %s" % (self.__class__.__name__,) if self.check_guts(mtime(self.out)): self.assemble() GUTS = [] def check_guts(self, last_build): pass def get_guts(self, last_build, missing ='missing or bad'): """ returns None if guts have changed """ try: data = _load_data(self.out) except: print "building because", os.path.basename(self.out), missing return None if len(data) != len(self.GUTS): print "building because %s is bad" % self.outnm return None for i in range(len(self.GUTS)): attr, func = self.GUTS[i] if func is None: # no check for this value continue if func(attr, data[i], getattr(self, attr), last_build): return None return data class Analysis(Target): def __init__(self, scripts=None, pathex=None, hookspath=None, excludes=None): Target.__init__(self) self.inputs = scripts for script in scripts: if not os.path.exists(script): raise ValueError, "script '%s' not found" % script self.pathex = [] if pathex: for path in pathex: self.pathex.append(absnormpath(path)) sys.pathex = self.pathex[:] self.hookspath = hookspath self.excludes = excludes self.scripts = TOC() self.pure = TOC() self.binaries = TOC() self.zipfiles = TOC() self.datas = TOC() self.__postinit__() GUTS = (('inputs', _check_guts_eq), ('pathex', _check_guts_eq), ('hookspath', _check_guts_eq), ('excludes', _check_guts_eq), ('scripts', _check_guts_toc_mtime), ('pure', lambda *args: apply(_check_guts_toc_mtime, args, {'pyc': 1 } )), ('binaries', _check_guts_toc_mtime), ('zipfiles', _check_guts_toc_mtime), ('datas', _check_guts_toc_mtime), ) def check_guts(self, last_build): if last_build == 0: print "building %s because %s non existent" % (self.__class__.__name__, self.outnm) return True for fnm in self.inputs: if mtime(fnm) > last_build: print "building because %s changed" % fnm return True data = Target.get_guts(self, last_build) if not data: return True scripts, pure, binaries, zipfiles, datas = data[-5:] self.scripts = TOC(scripts) self.pure = TOC(pure) self.binaries = TOC(binaries) self.zipfiles = TOC(zipfiles) self.datas = TOC(datas) return False def assemble(self): print "running Analysis", os.path.basename(self.out) # Reset seen variable to correctly discover dependencies # if there are multiple Analysis in a single specfile. bindepend.seen = {} paths = self.pathex for i in range(len(paths)): # FIXME: isn't self.pathex already norm-abs-pathed? paths[i] = absnormpath(paths[i]) ################################################### # Scan inputs and prepare: dirs = {} # input directories pynms = [] # python filenames with no extension for script in self.inputs: if not os.path.exists(script): print "Analysis: script %s not found!" % script sys.exit(1) d, base = os.path.split(script) if not d: d = os.getcwd() d = absnormpath(d) pynm, ext = os.path.splitext(base) dirs[d] = 1 pynms.append(pynm) ################################################### # Initialize analyzer and analyze scripts analyzer = mf.ImportTracker(dirs.keys()+paths, self.hookspath, self.excludes, target_platform=target_platform) #print analyzer.path scripts = [] # will contain scripts to bundle for i in range(len(self.inputs)): script = self.inputs[i] print "Analyzing:", script analyzer.analyze_script(script) scripts.append((pynms[i], script, 'PYSOURCE')) ################################################### # Fills pure, binaries and rthookcs lists to TOC pure = [] # pure python modules binaries = [] # binaries to bundle zipfiles = [] # zipfiles to bundle datas = [] # datafiles to bundle rthooks = [] # rthooks if needed for modnm, mod in analyzer.modules.items(): # FIXME: why can we have a mod == None here? if mod is not None: hooks = findRTHook(modnm) #XXX if hooks: rthooks.extend(hooks) datas.extend(mod.datas) if isinstance(mod, mf.BuiltinModule): pass else: fnm = mod.__file__ if isinstance(mod, mf.ExtensionModule): binaries.append((mod.__name__, fnm, 'EXTENSION')) elif isinstance(mod, (mf.PkgInZipModule, mf.PyInZipModule)): zipfiles.append(("eggs/" + os.path.basename(str(mod.owner)), str(mod.owner), 'ZIPFILE')) else: # mf.PyModule instances expose a list of binary # dependencies, most probably shared libraries accessed # via ctypes. Add them to the overall required binaries. binaries.extend(mod.binaries) if modnm != '__main__': pure.append((modnm, fnm, 'PYMODULE')) binaries.extend(bindepend.Dependencies(binaries, platform=target_platform)) self.fixMissingPythonLib(binaries) if zipfiles: scripts[-1:-1] = [("_pyi_egg_install.py", os.path.join(HOMEPATH, "support/_pyi_egg_install.py"), 'PYSOURCE')] # Add realtime hooks just before the last script (which is # the entrypoint of the application). scripts[-1:-1] = rthooks self.scripts = TOC(scripts) self.pure = TOC(pure) self.binaries = TOC(binaries) self.zipfiles = TOC(zipfiles) self.datas = TOC(datas) try: # read .toc oldstuff = _load_data(self.out) except: oldstuff = None self.pure = TOC(compile_pycos(self.pure)) newstuff = (self.inputs, self.pathex, self.hookspath, self.excludes, self.scripts, self.pure, self.binaries, self.zipfiles, self.datas) if oldstuff != newstuff: _save_data(self.out, newstuff) wf = open(WARNFILE, 'w') for ln in analyzer.getwarnings(): wf.write(ln+'\n') wf.close() print "Warnings written to %s" % WARNFILE return 1 print self.out, "no change!" return 0 def fixMissingPythonLib(self, binaries): """Add the Python library if missing from the binaries. Some linux distributions (e.g. debian-based) statically build the Python executable to the libpython, so bindepend doesn't include it in its output. """ # minimal patch for OSX. Loader expects "Python" framework lib to be bundled if target_platform == "darwin": lib = os.path.join(sys.exec_prefix,'Python') try: exists = os.stat(lib) binaries.append(('Python', lib, 'BINARY')) except: print 'Warning: could not find python framework to bundle' if target_platform != 'linux2': return name = 'libpython%d.%d.so' % sys.version_info[:2] for (nm, fnm, typ) in binaries: if typ == 'BINARY' and name in fnm: # lib found return lib = bindepend.findLibrary(name) if lib is None: raise IOError("Python library not found!") binaries.append((os.path.split(lib)[1], lib, 'BINARY')) def findRTHook(modnm): hooklist = rthooks.get(modnm) if hooklist: rslt = [] for script in hooklist: nm = os.path.basename(script) nm = os.path.splitext(nm)[0] if os.path.isabs(script): path = script else: path = os.path.join(HOMEPATH, script) rslt.append((nm, path, 'PYSOURCE')) return rslt return None class PYZ(Target): typ = 'PYZ' def __init__(self, toc, name=None, level=9, crypt=None): Target.__init__(self) self.toc = toc self.name = name if name is None: self.name = self.out[:-3] + 'pyz' if config['useZLIB']: self.level = level else: self.level = 0 if config['useCrypt'] and crypt is not None: self.crypt = archive.Keyfile(crypt).key else: self.crypt = None self.dependencies = compile_pycos(config['PYZ_dependencies']) self.__postinit__() GUTS = (('name', _check_guts_eq), ('level', _check_guts_eq), ('crypt', _check_guts_eq), ('toc', _check_guts_toc), # todo: pyc=1 ) def check_guts(self, last_build): if not os.path.exists(self.name): print "rebuilding %s because %s is missing" % (self.outnm, os.path.basename(self.name)) return True data = Target.get_guts(self, last_build) if not data: return True return False def assemble(self): print "building PYZ", os.path.basename(self.out) pyz = archive.ZlibArchive(level=self.level, crypt=self.crypt) toc = self.toc - config['PYZ_dependencies'] pyz.build(self.name, toc) _save_data(self.out, (self.name, self.level, self.crypt, self.toc)) return 1 def cacheDigest(fnm): data = open(fnm, "rb").read() digest = md5(data).digest() return digest def checkCache(fnm, strip, upx): # On darwin a cache is required anyway to keep the libaries # with relative install names if not strip and not upx and sys.platform != 'darwin': return fnm if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(HOMEPATH, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if cache_index.has_key(basenm): if digest != cache_index[basenm]: os.remove(cachedfile) else: return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = '"' + upx_executable + '" ' + bestopt + " -q \"%s\"" % cachedfile else: if strip: cmd = "strip \"%s\"" % cachedfile shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if cmd: system(cmd) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) return cachedfile UNCOMPRESSED, COMPRESSED, ENCRYPTED = range(3) class PKG(Target): typ = 'PKG' xformdict = {'PYMODULE' : 'm', 'PYSOURCE' : 's', 'EXTENSION' : 'b', 'PYZ' : 'z', 'PKG' : 'a', 'DATA': 'x', 'BINARY': 'b', 'ZIPFILE': 'Z', 'EXECUTABLE': 'b'} def __init__(self, toc, name=None, cdict=None, exclude_binaries=0, strip_binaries=0, upx_binaries=0, crypt=0): Target.__init__(self) self.toc = toc self.cdict = cdict self.name = name self.exclude_binaries = exclude_binaries self.strip_binaries = strip_binaries self.upx_binaries = upx_binaries self.crypt = crypt if name is None: self.name = self.out[:-3] + 'pkg' if self.cdict is None: if config['useZLIB']: self.cdict = {'EXTENSION':COMPRESSED, 'DATA':COMPRESSED, 'BINARY':COMPRESSED, 'EXECUTABLE':COMPRESSED, 'PYSOURCE':COMPRESSED, 'PYMODULE':COMPRESSED } if self.crypt: self.cdict['PYSOURCE'] = ENCRYPTED self.cdict['PYMODULE'] = ENCRYPTED else: self.cdict = { 'PYSOURCE':UNCOMPRESSED } self.__postinit__() GUTS = (('name', _check_guts_eq), ('cdict', _check_guts_eq), ('toc', _check_guts_toc_mtime), ('exclude_binaries', _check_guts_eq), ('strip_binaries', _check_guts_eq), ('upx_binaries', _check_guts_eq), ('crypt', _check_guts_eq), ) def check_guts(self, last_build): if not os.path.exists(self.name): print "rebuilding %s because %s is missing" % (self.outnm, os.path.basename(self.name)) return 1 data = Target.get_guts(self, last_build) if not data: return True # todo: toc equal return False def assemble(self): print "building PKG", os.path.basename(self.name) trash = [] mytoc = [] toc = TOC() for item in self.toc: inm, fnm, typ = item if typ == 'EXTENSION': binext = os.path.splitext(fnm)[1] if not os.path.splitext(inm)[1] == binext: inm = inm + binext toc.append((inm, fnm, typ)) seen = {} for inm, fnm, typ in toc: if typ in ('BINARY', 'EXTENSION'): if self.exclude_binaries: self.dependencies.append((inm, fnm, typ)) else: fnm = checkCache(fnm, self.strip_binaries, self.upx_binaries and ( iswin or cygwin ) and config['hasUPX']) # Avoid importing the same binary extension twice. This might # happen if they come from different sources (eg. once from # binary dependence, and once from direct import). if typ == 'BINARY' and seen.has_key(fnm): continue seen[fnm] = 1 mytoc.append((inm, fnm, self.cdict.get(typ,0), self.xformdict.get(typ,'b'))) elif typ == 'OPTION': mytoc.append((inm, '', 0, 'o')) else: mytoc.append((inm, fnm, self.cdict.get(typ,0), self.xformdict.get(typ,'b'))) archive = carchive.CArchive() archive.build(self.name, mytoc) _save_data(self.out, (self.name, self.cdict, self.toc, self.exclude_binaries, self.strip_binaries, self.upx_binaries, self.crypt)) for item in trash: os.remove(item) return 1 class EXE(Target): typ = 'EXECUTABLE' exclude_binaries = 0 append_pkg = 1 def __init__(self, *args, **kws): Target.__init__(self) self.console = kws.get('console',1) self.debug = kws.get('debug',0) self.name = kws.get('name',None) self.icon = kws.get('icon',None) self.versrsrc = kws.get('version',None) self.strip = kws.get('strip',None) self.upx = kws.get('upx',None) self.crypt = kws.get('crypt', 0) self.exclude_binaries = kws.get('exclude_binaries',0) self.append_pkg = kws.get('append_pkg', self.append_pkg) if self.name is None: self.name = self.out[:-3] + 'exe' if not os.path.isabs(self.name): self.name = os.path.join(SPECPATH, self.name) if target_iswin or cygwin: self.pkgname = self.name[:-3] + 'pkg' else: self.pkgname = self.name + '.pkg' self.toc = TOC() for arg in args: if isinstance(arg, TOC): self.toc.extend(arg) elif isinstance(arg, Target): self.toc.append((os.path.basename(arg.name), arg.name, arg.typ)) self.toc.extend(arg.dependencies) else: self.toc.extend(arg) self.toc.extend(config['EXE_dependencies']) self.pkg = PKG(self.toc, cdict=kws.get('cdict',None), exclude_binaries=self.exclude_binaries, strip_binaries=self.strip, upx_binaries=self.upx, crypt=self.crypt) self.dependencies = self.pkg.dependencies self.__postinit__() GUTS = (('name', _check_guts_eq), ('console', _check_guts_eq), ('debug', _check_guts_eq), ('icon', _check_guts_eq), ('versrsrc', _check_guts_eq), ('strip', _check_guts_eq), ('upx', _check_guts_eq), ('crypt', _check_guts_eq), ('mtm', None,), # checked bellow ) def check_guts(self, last_build): if not os.path.exists(self.name): print "rebuilding %s because %s missing" % (self.outnm, os.path.basename(self.name)) return 1 if not self.append_pkg and not os.path.exists(self.pkgname): print "rebuilding because %s missing" % ( os.path.basename(self.pkgname),) return 1 data = Target.get_guts(self, last_build) if not data: return True icon, versrsrc = data[3:5] if (icon or versrsrc) and not config['hasRsrcUpdate']: # todo: really ignore :-) print "ignoring icon and version resources = platform not capable" mtm = data[-1] crypt = data[-2] if crypt != self.crypt: print "rebuilding %s because crypt option changed" % outnm return 1 if mtm != mtime(self.name): print "rebuilding", self.outnm, "because mtimes don't match" return True if mtm < mtime(self.pkg.out): print "rebuilding", self.outnm, "because pkg is more recent" return True return False def _bootloader_postfix(self, exe): if target_iswin: exe = exe + "_" is24 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,4) exe = exe + "67"[is24] exe = exe + "rd"[self.debug] exe = exe + "wc"[self.console] else: if not self.console: exe = exe + 'w' if self.debug: exe = exe + '_d' return exe def assemble(self): print "building EXE from", os.path.basename(self.out) trash = [] if not os.path.exists(os.path.dirname(self.name)): os.makedirs(os.path.dirname(self.name)) outf = open(self.name, 'wb') exe = self._bootloader_postfix('support/loader/run') exe = os.path.join(HOMEPATH, exe) if target_iswin or cygwin: exe = exe + '.exe' if config['hasRsrcUpdate']: if self.icon: tmpnm = tempfile.mktemp() shutil.copy2(exe, tmpnm) os.chmod(tmpnm, 0755) icon.CopyIcons(tmpnm, self.icon) trash.append(tmpnm) exe = tmpnm if self.versrsrc: tmpnm = tempfile.mktemp() shutil.copy2(exe, tmpnm) os.chmod(tmpnm, 0755) versionInfo.SetVersion(tmpnm, self.versrsrc) trash.append(tmpnm) exe = tmpnm exe = checkCache(exe, self.strip, self.upx and config['hasUPX']) self.copy(exe, outf) if self.append_pkg: print "Appending archive to EXE", self.name self.copy(self.pkg.name, outf) else: print "Copying archive to", self.pkgname shutil.copy2(self.pkg.name, self.pkgname) outf.close() os.chmod(self.name, 0755) _save_data(self.out, (self.name, self.console, self.debug, self.icon, self.versrsrc, self.strip, self.upx, self.crypt, mtime(self.name))) for item in trash: os.remove(item) return 1 def copy(self, fnm, outf): inf = open(fnm, 'rb') while 1: data = inf.read(64*1024) if not data: break outf.write(data) class DLL(EXE): def assemble(self): print "building DLL", os.path.basename(self.out) outf = open(self.name, 'wb') dll = self._bootloader_postfix('support/loader/inprocsrvr') dll = os.path.join(HOMEPATH, dll) + '.dll' self.copy(dll, outf) self.copy(self.pkg.name, outf) outf.close() os.chmod(self.name, 0755) _save_data(self.out, (self.name, self.console, self.debug, self.icon, self.versrsrc, self.strip, self.upx, mtime(self.name))) return 1 class COLLECT(Target): def __init__(self, *args, **kws): Target.__init__(self) self.name = kws.get('name',None) if self.name is None: self.name = 'dist_' + self.out[:-4] self.strip_binaries = kws.get('strip',0) self.upx_binaries = kws.get('upx',0) if not os.path.isabs(self.name): self.name = os.path.join(SPECPATH, self.name) self.toc = TOC() for arg in args: if isinstance(arg, TOC): self.toc.extend(arg) elif isinstance(arg, Target): self.toc.append((os.path.basename(arg.name), arg.name, arg.typ)) if isinstance(arg, EXE) and not arg.append_pkg: self.toc.append((os.path.basename(arg.pkgname), arg.pkgname, 'PKG')) self.toc.extend(arg.dependencies) else: self.toc.extend(arg) self.__postinit__() GUTS = (('name', _check_guts_eq), ('strip_binaries', _check_guts_eq), ('upx_binaries', _check_guts_eq), ('toc', _check_guts_eq), # additional check below ) def check_guts(self, last_build): data = Target.get_guts(self, last_build) if not data: return True toc = data[-1] for inm, fnm, typ in self.toc: if typ == 'EXTENSION': ext = os.path.splitext(fnm)[1] test = os.path.join(self.name, inm+ext) else: test = os.path.join(self.name, os.path.basename(fnm)) if not os.path.exists(test): print "building %s because %s is missing" % (self.outnm, test) return 1 if mtime(fnm) > mtime(test): print "building %s because %s is more recent" % (self.outnm, fnm) return 1 return 0 def assemble(self): print "building COLLECT", os.path.basename(self.out) if not os.path.exists(self.name): os.makedirs(self.name) toc = TOC() for inm, fnm, typ in self.toc: if typ == 'EXTENSION': binext = os.path.splitext(fnm)[1] if not os.path.splitext(inm)[1] == binext: inm = inm + binext toc.append((inm, fnm, typ)) for inm, fnm, typ in toc: tofnm = os.path.join(self.name, inm) todir = os.path.dirname(tofnm) if not os.path.exists(todir): os.makedirs(todir) if typ in ('EXTENSION', 'BINARY'): fnm = checkCache(fnm, self.strip_binaries, self.upx_binaries and ( iswin or cygwin ) and config['hasUPX']) shutil.copy2(fnm, tofnm) if typ in ('EXTENSION', 'BINARY'): os.chmod(tofnm, 0755) _save_data(self.out, (self.name, self.strip_binaries, self.upx_binaries, self.toc)) return 1 class BUNDLE(Target): def __init__(self, *args, **kws): Target.__init__(self) self.appname = kws.get("appname", None) self.version = kws.get("version", "0.0.0") self.toc = TOC() for arg in args: if isinstance(arg, EXE): if self.appname is None: self.appname = "Mac%s" % (arg.name,) self.name = os.path.join(os.path.dirname(SPECPATH), self.appname + ".app") self.exename = arg.name self.toc.append((os.path.basename(arg.name), arg.name, arg.typ)) self.toc.extend(arg.dependencies) else: print "unsupported entry %s", arg.__class__.__name__ self.__postinit__() GUTS = (('toc', _check_guts_eq), # additional check below ) def check_guts(self, last_build): data = Target.get_guts(self, last_build) if not data: return True toc = data[-1] for inm, fnm, typ in self.toc: test = os.path.join(self.name, os.path.basename(fnm)) if not os.path.exists(test): print "building %s because %s is missing" % (self.outnm, test) return 1 if mtime(fnm) > mtime(test): print "building %s because %s is more recent" % (self.outnm, fnm) return 1 return 0 def assemble(self): print "building BUNDLE", os.path.basename(self.out) if os.path.exists(self.name): shutil.rmtree(self.name) # Create a minimal Mac bundle structure os.makedirs(self.name) os.makedirs(os.path.join(self.name, "Contents")) os.makedirs(os.path.join(self.name, "Contents", "MacOS")) os.makedirs(os.path.join(self.name, "Contents", "Resources")) os.makedirs(os.path.join(self.name, "Contents", "Frameworks")) # Key/values for a minimal Info.plist file info_plist_dict = {"CFBundleDisplayName": self.appname, "CFBundleName": self.appname, "CFBundleExecutable": os.path.basename(self.exename), "CFBundleIconFile": "App.icns", "CFBundleInfoDictionaryVersion": "6.0", "CFBundlePackageType": "APPL", "CFBundleShortVersionString": self.version, # Setting this to 1 will cause Mac OS X *not* to show # a dock icon for the PyInstaller process which # decompresses the real executable's contents - # actually, it's not clear why the real executable # gets instead an icon doing so. "LSBackgroundOnly": "1", } info_plist = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict>""" for k, v in info_plist_dict.items(): info_plist += "<key>%s</key>\n<string>%s</string>\n" % (k, v) info_plist += """</dict> </plist>""" f = open(os.path.join(self.name, "Contents", "Info.plist"), "w") f.write(info_plist) f.close() for inm, fnm, typ in self.toc: tofnm = os.path.join(self.name, "Contents", "MacOS", inm) todir = os.path.dirname(tofnm) if not os.path.exists(todir): os.makedirs(todir) shutil.copy2(fnm, tofnm) return 1 class TOC(UserList.UserList): def __init__(self, initlist=None): UserList.UserList.__init__(self) self.fltr = {} if initlist: for tpl in initlist: self.append(tpl) def append(self, tpl): try: fn = tpl[0] if tpl[2] == "BINARY": # Normalize the case for binary files only (to avoid duplicates # for different cases under Windows). We can't do that for # Python files because the import semantic (even at runtime) # depends on the case. fn = os.path.normcase(fn) if not self.fltr.get(fn): self.data.append(tpl) self.fltr[fn] = 1 except TypeError: print "TOC found a %s, not a tuple" % tpl raise def insert(self, pos, tpl): fn = tpl[0] if tpl[2] == "BINARY": fn = os.path.normcase(fn) if not self.fltr.get(fn): self.data.insert(pos, tpl) self.fltr[fn] = 1 def __add__(self, other): rslt = TOC(self.data) rslt.extend(other) return rslt def __radd__(self, other): rslt = TOC(other) rslt.extend(self.data) return rslt def extend(self, other): for tpl in other: self.append(tpl) def __sub__(self, other): fd = self.fltr.copy() # remove from fd if it's in other for tpl in other: if fd.get(tpl[0],0): del fd[tpl[0]] rslt = TOC() # return only those things still in fd (preserve order) for tpl in self.data: if fd.get(tpl[0],0): rslt.append(tpl) return rslt def __rsub__(self, other): rslt = TOC(other) return rslt.__sub__(self) def intersect(self, other): rslt = TOC() for tpl in other: if self.fltr.get(tpl[0],0): rslt.append(tpl) return rslt class Tree(Target, TOC): def __init__(self, root=None, prefix=None, excludes=None): Target.__init__(self) TOC.__init__(self) self.root = root self.prefix = prefix self.excludes = excludes if excludes is None: self.excludes = [] self.__postinit__() GUTS = (('root', _check_guts_eq), ('prefix', _check_guts_eq), ('excludes', _check_guts_eq), ('toc', None), ) def check_guts(self, last_build): data = Target.get_guts(self, last_build) if not data: return True stack = [ data[0] ] # root toc = data[3] # toc while stack: d = stack.pop() if mtime(d) > last_build: print "building %s because directory %s changed" % (self.outnm, d) return True for nm in os.listdir(d): path = os.path.join(d, nm) if os.path.isdir(path): stack.append(path) self.data = toc return False def assemble(self): print "building Tree", os.path.basename(self.out) stack = [(self.root, self.prefix)] excludes = {} xexcludes = {} for nm in self.excludes: if nm[0] == '*': xexcludes[nm[1:]] = 1 else: excludes[nm] = 1 rslt = [] while stack: dir, prefix = stack.pop() for fnm in os.listdir(dir): if excludes.get(fnm, 0) == 0: ext = os.path.splitext(fnm)[1] if xexcludes.get(ext,0) == 0: fullfnm = os.path.join(dir, fnm) rfnm = prefix and os.path.join(prefix, fnm) or fnm if os.path.isdir(fullfnm): stack.append((fullfnm, rfnm)) else: rslt.append((rfnm, fullfnm, 'DATA')) self.data = rslt try: oldstuff = _load_data(self.out) except: oldstuff = None newstuff = (self.root, self.prefix, self.excludes, self.data) if oldstuff != newstuff: _save_data(self.out, newstuff) return 1 print self.out, "no change!" return 0 def TkTree(): tclroot = config['TCL_root'] tclnm = os.path.join('_MEI', os.path.basename(tclroot)) tkroot = config['TK_root'] tknm = os.path.join('_MEI', os.path.basename(tkroot)) tcltree = Tree(tclroot, tclnm, excludes=['demos','encoding','*.lib']) tktree = Tree(tkroot, tknm, excludes=['demos','encoding','*.lib']) return tcltree + tktree def TkPKG(): return PKG(TkTree(), name='tk.pkg') #--- def build(spec): global SPECPATH, BUILDPATH, WARNFILE, rthooks rthooks = _load_data(os.path.join(HOMEPATH, 'rthooks.dat')) SPECPATH, specnm = os.path.split(spec) specnm = os.path.splitext(specnm)[0] if SPECPATH == '': SPECPATH = os.getcwd() WARNFILE = os.path.join(SPECPATH, 'warn%s.txt' % specnm) BUILDPATH = os.path.join(SPECPATH, 'build', "pyi." + config['target_platform'], specnm) if '-o' in sys.argv: bpath = sys.argv[sys.argv.index('-o')+1] if os.path.isabs(bpath): BUILDPATH = bpath else: BUILDPATH = os.path.join(SPECPATH, bpath) if not os.path.exists(BUILDPATH): os.makedirs(BUILDPATH) execfile(spec) def main(specfile, configfilename): global target_platform, target_iswin, config global icon, versionInfo try: config = _load_data(configfilename) except IOError: print "You must run Configure.py before building!" sys.exit(1) target_platform = config.get('target_platform', sys.platform) target_iswin = target_platform[:3] == 'win' if target_platform == sys.platform: # _not_ cross compiling if config['pythonVersion'] != sys.version: print "The current version of Python is not the same with which PyInstaller was configured." print "Please re-run Configure.py with this version." sys.exit(1) if config.setdefault('pythonDebug', None) != __debug__: print "python optimization flags changed: rerun Configure.py with the same [-O] option" print "Configure.py optimize=%s, Build.py optimize=%s" % (not config['pythonDebug'], not __debug__) sys.exit(1) if config['hasRsrcUpdate']: import icon, versionInfo if config['hasUPX']: setupUPXFlags() if not config['useELFEXE']: EXE.append_pkg = 0 build(specfile) if __name__ == '__main__': from pyi_optparse import OptionParser parser = OptionParser('%prog [options] specfile') parser.add_option('-C', '--configfile', default=os.path.join(HOMEPATH, 'config.dat'), help='Name of generated configfile (default: %default)') opts, args = parser.parse_args() if len(args) != 1: parser.error('Requires exactly one .spec-file') main(args[0], configfilename=opts.configfile)
gpl-2.0
gdi2290/django
django/contrib/gis/gdal/tests/test_driver.py
335
1253
import unittest from django.contrib.gis.gdal import HAS_GDAL if HAS_GDAL: from django.contrib.gis.gdal import Driver, GDALException valid_drivers = ( # vector 'ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN', 'Memory', 'CSV', 'GML', 'KML', # raster 'GTiff', 'JPEG', 'MEM', 'PNG', ) invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp', 'ESRI rast') aliases = { 'eSrI': 'ESRI Shapefile', 'TigER/linE': 'TIGER', 'SHAPE': 'ESRI Shapefile', 'sHp': 'ESRI Shapefile', 'tiFf': 'GTiff', 'tIf': 'GTiff', 'jPEg': 'JPEG', 'jpG': 'JPEG', } @unittest.skipUnless(HAS_GDAL, "GDAL is required") class DriverTest(unittest.TestCase): def test01_valid_driver(self): "Testing valid GDAL/OGR Data Source Drivers." for d in valid_drivers: dr = Driver(d) self.assertEqual(d, str(dr)) def test02_invalid_driver(self): "Testing invalid GDAL/OGR Data Source Drivers." for i in invalid_drivers: self.assertRaises(GDALException, Driver, i) def test03_aliases(self): "Testing driver aliases." for alias, full_name in aliases.items(): dr = Driver(alias) self.assertEqual(full_name, str(dr))
bsd-3-clause
neumerance/cloudloon2
.venv/lib/python2.7/site-packages/django/forms/extras/widgets.py
110
5251
""" Extra HTML Widget classes """ from __future__ import unicode_literals import datetime import re from django.forms.widgets import Widget, Select from django.utils import datetime_safe from django.utils.dates import MONTHS from django.utils.safestring import mark_safe from django.utils.formats import get_format from django.utils import six from django.conf import settings __all__ = ('SelectDateWidget',) RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$') def _parse_date_fmt(): fmt = get_format('DATE_FORMAT') escaped = False output = [] for char in fmt: if escaped: escaped = False elif char == '\\': escaped = True elif char in 'Yy': output.append('year') #if not self.first_select: self.first_select = 'year' elif char in 'bEFMmNn': output.append('month') #if not self.first_select: self.first_select = 'month' elif char in 'dj': output.append('day') #if not self.first_select: self.first_select = 'day' return output class SelectDateWidget(Widget): """ A Widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = (0, '---') month_field = '%s_month' day_field = '%s_day' year_field = '%s_year' def __init__(self, attrs=None, years=None, required=True): # years is an optional list/tuple of years to use in the "year" select box. self.attrs = attrs or {} self.required = required if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year+10) def render(self, name, value, attrs=None): try: year_val, month_val, day_val = value.year, value.month, value.day except AttributeError: year_val = month_val = day_val = None if isinstance(value, six.string_types): if settings.USE_L10N: try: input_format = get_format('DATE_INPUT_FORMATS')[0] v = datetime.datetime.strptime(value, input_format) year_val, month_val, day_val = v.year, v.month, v.day except ValueError: pass else: match = RE_DATE.match(value) if match: year_val, month_val, day_val = [int(v) for v in match.groups()] choices = [(i, i) for i in self.years] year_html = self.create_select(name, self.year_field, value, year_val, choices) choices = list(six.iteritems(MONTHS)) month_html = self.create_select(name, self.month_field, value, month_val, choices) choices = [(i, i) for i in range(1, 32)] day_html = self.create_select(name, self.day_field, value, day_val, choices) output = [] for field in _parse_date_fmt(): if field == 'year': output.append(year_html) elif field == 'month': output.append(month_html) elif field == 'day': output.append(day_html) return mark_safe('\n'.join(output)) def id_for_label(self, id_): first_select = None field_list = _parse_date_fmt() if field_list: first_select = field_list[0] if first_select is not None: return '%s_%s' % (id_, first_select) else: return '%s_month' % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == "0": return None if y and m and d: if settings.USE_L10N: input_format = get_format('DATE_INPUT_FORMATS')[0] try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: return '%s-%s-%s' % (y, m, d) else: date_value = datetime_safe.new_date(date_value) return date_value.strftime(input_format) else: return '%s-%s-%s' % (y, m, d) return data.get(name, None) def create_select(self, name, field, value, val, choices): if 'id' in self.attrs: id_ = self.attrs['id'] else: id_ = 'id_%s' % name if not (self.required and val): choices.insert(0, self.none_value) local_attrs = self.build_attrs(id=field % id_) s = Select(choices=choices) select_html = s.render(field % name, val, local_attrs) return select_html def _has_changed(self, initial, data): try: input_format = get_format('DATE_INPUT_FORMATS')[0] data = datetime_safe.datetime.strptime(data, input_format).date() except (TypeError, ValueError): pass return super(SelectDateWidget, self)._has_changed(initial, data)
apache-2.0
ismailsunni/healthsites
django_project/localities/tests/test_model_DomainArchive.py
2
1095
# -*- coding: utf-8 -*- from django.test import TestCase from .model_factories import DomainF from ..models import DomainArchive class TestModelDomainArchive(TestCase): def test_domainArchive_fields(self): self.assertListEqual( [fld.name for fld in DomainArchive._meta.fields], [ u'id', 'changeset', 'version', 'content_type', 'object_id', 'name', 'description', 'template_fragment' ] ) def test_archiving(self): domain = DomainF.create(name='A domain') domain.name = 'test' domain.save() domain.description = 'a description' domain.save() # test save with no changes, should not trigger model archival domain.save() self.assertEqual(DomainArchive.objects.count(), 3) self.assertListEqual( [dom.name for dom in DomainArchive.objects.all()], ['A domain', 'test', 'test'] ) self.assertListEqual( [dom.version for dom in DomainArchive.objects.all()], [1, 2, 3] )
bsd-2-clause
ltiao/networkx
networkx/algorithms/tests/test_distance_measures.py
6
2145
#!/usr/bin/env python from nose.tools import * import networkx class TestDistance: def setUp(self): G=networkx.Graph() from networkx import convert_node_labels_to_integers as cnlti G=cnlti(networkx.grid_2d_graph(4,4),first_label=1,ordering="sorted") self.G=G def test_eccentricity(self): assert_equal(networkx.eccentricity(self.G,1),6) e=networkx.eccentricity(self.G) assert_equal(e[1],6) sp = dict(networkx.shortest_path_length(self.G)) e=networkx.eccentricity(self.G,sp=sp) assert_equal(e[1],6) e=networkx.eccentricity(self.G,v=1) assert_equal(e,6) e=networkx.eccentricity(self.G,v=[1,1]) #This behavior changed in version 1.8 (ticket #739) assert_equal(e[1],6) e=networkx.eccentricity(self.G,v=[1,2]) assert_equal(e[1],6) # test against graph with one node G=networkx.path_graph(1) e=networkx.eccentricity(G) assert_equal(e[0],0) e=networkx.eccentricity(G,v=0) assert_equal(e,0) assert_raises(networkx.NetworkXError, networkx.eccentricity, G, 1) # test against empty graph G=networkx.empty_graph() e=networkx.eccentricity(G) assert_equal(e,{}) def test_diameter(self): assert_equal(networkx.diameter(self.G),6) def test_radius(self): assert_equal(networkx.radius(self.G),4) def test_periphery(self): assert_equal(set(networkx.periphery(self.G)),set([1, 4, 13, 16])) def test_center(self): assert_equal(set(networkx.center(self.G)),set([6, 7, 10, 11])) def test_radius_exception(self): G=networkx.Graph() G.add_edge(1,2) G.add_edge(3,4) assert_raises(networkx.NetworkXError, networkx.diameter, G) @raises(networkx.NetworkXError) def test_eccentricity_infinite(self): G=networkx.Graph([(1,2),(3,4)]) e = networkx.eccentricity(G) @raises(networkx.NetworkXError) def test_eccentricity_invalid(self): G=networkx.Graph([(1,2),(3,4)]) e = networkx.eccentricity(G,sp=1)
bsd-3-clause
idea4bsd/idea4bsd
python/helpers/py2only/docutils/languages/cs.py
148
1928
# $Id: cs.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: Marek Blaha <[email protected]> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Czech-language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': u'Autor', 'authors': u'Auto\u0159i', 'organization': u'Organizace', 'address': u'Adresa', 'contact': u'Kontakt', 'version': u'Verze', 'revision': u'Revize', 'status': u'Stav', 'date': u'Datum', 'copyright': u'Copyright', 'dedication': u'V\u011Bnov\u00E1n\u00ED', 'abstract': u'Abstrakt', 'attention': u'Pozor!', 'caution': u'Opatrn\u011B!', 'danger': u'!NEBEZPE\u010C\u00CD!', 'error': u'Chyba', 'hint': u'Rada', 'important': u'D\u016Fle\u017Eit\u00E9', 'note': u'Pozn\u00E1mka', 'tip': u'Tip', 'warning': u'Varov\u00E1n\u00ED', 'contents': u'Obsah'} """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed u'autor': 'author', u'auto\u0159i': 'authors', u'organizace': 'organization', u'adresa': 'address', u'kontakt': 'contact', u'verze': 'version', u'revize': 'revision', u'stav': 'status', u'datum': 'date', u'copyright': 'copyright', u'v\u011Bnov\u00E1n\u00ED': 'dedication', u'abstrakt': 'abstract'} """Czech (lowcased) to canonical name mapping for bibliographic fields.""" author_separators = [';', ','] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
apache-2.0
cigroup-ol/metaopt
metaopt/tests/integration/optimizer/singleinvoke.py
1
1233
# -*- coding: utf-8 -*- """ Tests for the single invoke invoker. """ # Future from __future__ import absolute_import, division, print_function, \ unicode_literals, with_statement # Third Party import nose from mock import Mock # First Party from metaopt.concurrent.invoker.singleprocess import SingleProcessInvoker from metaopt.core.arg.util.creator import ArgsCreator from metaopt.core.paramspec.util import param from metaopt.core.returnspec.util.wrapper import ReturnValuesWrapper from metaopt.optimizer.singleinvoke import SingleInvokeOptimizer @param.int("a", interval=(2, 2)) @param.int("b", interval=(1, 1)) def f(a, b): return -(a + b) def test_optimize_returns_result(): optimizer = SingleInvokeOptimizer() optimizer.on_result = Mock() optimizer.on_error = Mock() invoker = SingleProcessInvoker() invoker.f = f optimizer.optimize(invoker=invoker, param_spec=f.param_spec, return_spec=None) args = ArgsCreator(f.param_spec).args() assert not optimizer.on_error.called optimizer.on_result.assert_called_with(value=ReturnValuesWrapper(None, -3), fargs=args) if __name__ == '__main__': nose.runmodule()
bsd-3-clause
SuperJohn/scikit-class
grid_search.py
6
1243
import pandas as pd import numpy as np df = pd.read_csv('tweets.csv') target = df['is_there_an_emotion_directed_at_a_brand_or_product'] text = df['tweet_text'] fixed_text = text[pd.notnull(text)] fixed_target = target[pd.notnull(text)] from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 p = Pipeline(steps=[('counts', CountVectorizer()), ('feature_selection', SelectKBest(chi2)), ('multinomialnb', MultinomialNB())]) from sklearn.grid_search import GridSearchCV parameters = { 'counts__max_df': (0.5, 0.75, 1.0), 'counts__min_df': (1, 2, 3), 'counts__ngram_range': ((1,1), (1,2)), # 'feature_selection__k': (1000, 10000, 100000) } grid_search = GridSearchCV(p, parameters, n_jobs=1, verbose=1, cv=10) grid_search.fit(fixed_text, fixed_target) print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name]))
gpl-2.0
fnouama/intellij-community
python/helpers/pydev/pydev_monkey_qt.py
47
5183
from __future__ import nested_scopes def set_trace_in_qt(): import pydevd_tracing from pydevd_comm import GetGlobalDebugger debugger = GetGlobalDebugger() if debugger is not None: pydevd_tracing.SetTrace(debugger.trace_dispatch) _patched_qt = False def patch_qt(): ''' This method patches qt (PySide, PyQt4, PyQt5) so that we have hooks to set the tracing for QThread. ''' # Avoid patching more than once global _patched_qt if _patched_qt: return _patched_qt = True # Ok, we have an issue here: # PyDev-452: Selecting PyQT API version using sip.setapi fails in debug mode # http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html # Mostly, if the user uses a different API version (i.e.: v2 instead of v1), # that has to be done before importing PyQt4/5 modules (PySide doesn't have this issue # as it only implements v2). patch_qt_on_import = None try: import PySide except: try: import PyQt4 patch_qt_on_import = 'PyQt4' except: try: import PyQt5 patch_qt_on_import = 'PyQt5' except: return if patch_qt_on_import: _patch_import_to_patch_pyqt_on_import(patch_qt_on_import) else: _internal_patch_qt() def _patch_import_to_patch_pyqt_on_import(patch_qt_on_import): # I don't like this approach very much as we have to patch __import__, but I like even less # asking the user to configure something in the client side... # So, our approach is to patch PyQt4/5 right before the user tries to import it (at which # point he should've set the sip api version properly already anyways). dotted = patch_qt_on_import + '.' original_import = __import__ from _pydev_imps._pydev_sys_patch import patch_sys_module, patch_reload, cancel_patches_in_sys_module patch_sys_module() patch_reload() def patched_import(name, *args, **kwargs): if patch_qt_on_import == name or name.startswith(dotted): builtins.__import__ = original_import cancel_patches_in_sys_module() _internal_patch_qt() # Patch it only when the user would import the qt module return original_import(name, *args, **kwargs) try: import builtins except ImportError: import __builtin__ as builtins builtins.__import__ = patched_import def _internal_patch_qt(): try: from PySide import QtCore except: try: from PyQt4 import QtCore except: try: from PyQt5 import QtCore except: return _original_thread_init = QtCore.QThread.__init__ _original_runnable_init = QtCore.QRunnable.__init__ _original_QThread = QtCore.QThread class FuncWrapper: def __init__(self, original): self._original = original def __call__(self, *args, **kwargs): set_trace_in_qt() return self._original(*args, **kwargs) class StartedSignalWrapper: # Wrapper for the QThread.started signal def __init__(self, thread, original_started): self.thread = thread self.original_started = original_started def connect(self, func, *args, **kwargs): return self.original_started.connect(FuncWrapper(func), *args, **kwargs) def disconnect(self, *args, **kwargs): return self.original_started.disconnect(*args, **kwargs) def emit(self, *args, **kwargs): return self.original_started.emit(*args, **kwargs) class ThreadWrapper(QtCore.QThread): # Wrapper for QThread def __init__(self, *args, **kwargs): _original_thread_init(self, *args, **kwargs) # In PyQt5 the program hangs when we try to call original run method of QThread class. # So we need to distinguish instances of QThread class and instances of QThread inheritors. if self.__class__.run == _original_QThread.run: self.run = self._exec_run else: self._original_run = self.run self.run = self._new_run self._original_started = self.started self.started = StartedSignalWrapper(self, self.started) def _exec_run(self): set_trace_in_qt() return self.exec_() def _new_run(self): set_trace_in_qt() return self._original_run() class RunnableWrapper(QtCore.QRunnable): # Wrapper for QRunnable def __init__(self, *args, **kwargs): _original_runnable_init(self, *args, **kwargs) self._original_run = self.run self.run = self._new_run def _new_run(self): set_trace_in_qt() return self._original_run() QtCore.QThread = ThreadWrapper QtCore.QRunnable = RunnableWrapper
apache-2.0
donnerluetjen/ardupilot
Tools/LogAnalyzer/tests/TestAutotune.py
265
4748
from LogAnalyzer import Test,TestResult import DataflashLog # from ArduCopter/defines.h AUTOTUNE_INITIALISED = 30 AUTOTUNE_OFF = 31 AUTOTUNE_RESTART = 32 AUTOTUNE_SUCCESS = 33 AUTOTUNE_FAILED = 34 AUTOTUNE_REACHED_LIMIT = 35 AUTOTUNE_PILOT_TESTING = 36 AUTOTUNE_SAVEDGAINS = 37 AUTOTUNE_EVENTS = frozenset([AUTOTUNE_INITIALISED, AUTOTUNE_OFF, AUTOTUNE_RESTART, AUTOTUNE_SUCCESS, AUTOTUNE_FAILED, AUTOTUNE_REACHED_LIMIT, AUTOTUNE_PILOT_TESTING, AUTOTUNE_SAVEDGAINS]) class TestAutotune(Test): '''test for autotune success (copter only)''' class AutotuneSession(object): def __init__(self, events): self.events = events @property def linestart(self): return self.events[0][0] @property def linestop(self): return self.events[-1][0] @property def success(self): return AUTOTUNE_SUCCESS in [i for _,i in self.events] @property def failure(self): return AUTOTUNE_FAILED in [i for _,i in self.events] @property def limit(self): return AUTOTUNE_REACHED_LIMIT in [i for _,i in self.events] def __repr__(self): return "<AutotuneSession {}-{}>".format(self.linestart,self.linestop) def __init__(self): Test.__init__(self) self.name = "Autotune" def run(self, logdata, verbose): self.result = TestResult() self.result.status = TestResult.StatusType.GOOD if logdata.vehicleType != "ArduCopter": self.result.status = TestResult.StatusType.NA return for i in ['EV','ATDE','ATUN']: r = False if not i in logdata.channels: self.result.status = TestResult.StatusType.UNKNOWN self.result.statusMessage = "No {} log data".format(i) r = True if r: return events = filter(lambda x: x[1] in AUTOTUNE_EVENTS, logdata.channels["EV"]["Id"].listData) attempts = [] j = None for i in range(0,len(events)): line,ev = events[i] if ev == AUTOTUNE_INITIALISED: if j is not None: attempts.append(TestAutotune.AutotuneSession(events[j:i])) j = i # last attempt if j is not None: attempts.append(TestAutotune.AutotuneSession(events[j:])) for a in attempts: # this should not be necessary! def class_from_channel(c): members = dict({'__init__':lambda x: setattr(x,i,None) for i in logdata.channels[c]}) cls = type(\ 'Channel__{:s}'.format(c), (object,), members ) return cls # last wins if a.success: self.result.status = TestResult.StatusType.GOOD s = "[+]" elif a.failure: self.result.status = TestResult.StatusType.FAIL s = "[-]" else: self.result.status = TestResult.StatusType.UNKNOWN s = "[?]" s += " Autotune {}-{}\n".format(a.linestart,a.linestop) self.result.statusMessage += s if verbose: linenext = a.linestart + 1 while linenext < a.linestop: try: line = logdata.channels['ATUN']['RateMax'].getNearestValueFwd(linenext)[1] if line > a.linestop: break except: break atun = class_from_channel('ATUN')() for key in logdata.channels['ATUN']: setattr(atun, key, logdata.channels['ATUN'][key].getNearestValueFwd(linenext)[0]) linenext = logdata.channels['ATUN'][key].getNearestValueFwd(linenext)[1] + 1 self.result.statusMessage += 'ATUN Axis:{atun.Axis} TuneStep:{atun.TuneStep} RateMin:{atun.RateMin:5.0f} RateMax:{atun.RateMax:5.0f} RPGain:{atun.RPGain:1.4f} RDGain:{atun.RDGain:1.4f} SPGain:{atun.SPGain:1.1f} (@line:{l})\n'.format(l=linenext,s=s, atun=atun) self.result.statusMessage += '\n'
gpl-3.0
radiasoft/radtrack
experimental/hermite/testHermite02.py
1
6919
# # Test executable #2 to exercise the Gauss-Hermite class # Here, we fit a Gauss-Hermite expansion to an arbitrary profile. # The SciPy least squares method is used. # # Copyright (c) 2013 RadiaBeam Technologies. All rights reserved # # python imports import math # SciPy imports import numpy as np import matplotlib.pyplot as plt # RadiaBeam imports from radtrack.fields import RbGaussHermiteMN # SciPy imports import numpy as np import matplotlib.pyplot as plt from scipy.optimize import leastsq # --------------------------------------------------------- # Make sure the residual() method has access to necessary # 'global' data: global mMax, nMax, numFuncCalls, hermiteSeries # Specify the central laser wavelength lambda0 = 10.e-06 # Need a place holder for the waist size w0 = 10.*lambda0 # Define the maximum order(s) of the Hermite expansion mMax = 0 # horizontal nMax = 0 # vertical # Create an instance of the Hermite expansion class hermiteSeries = RbGaussHermiteMN.RbGaussHermiteMN(lambda0,w0,w0,0.) # Specify the desired grid size numX = 50 numY = 50 nCells = numX * numY # load up the x,y locations of the mesh xMin = -4.*w0 xMax = 4.*w0 yMin = xMin yMax = xMax xArr = np.zeros(numX) for iLoop in range(numX): xArr[iLoop] = xMin + iLoop * (xMax-xMin) / (numX-1) yArr = np.zeros(numY) for jLoop in range(numY): yArr[jLoop] = yMin + jLoop * (yMax-yMin) / (numY-1) xGrid = np.zeros((numX, numY)) yGrid = np.zeros((numX, numY)) for iLoop in range(numX): for jLoop in range(numY): xGrid[iLoop,jLoop] = xMin + iLoop * (xMax-xMin) / (numX-1) yGrid[iLoop,jLoop] = yMin + jLoop * (yMax-yMin) / (numY-1) # Create transverse field profile (#1 simple Gaussian) ExGrid = np.zeros((numX, numY)) exMax = 1.0e+09 # this gets scaled out before plotting/fitting phi1 = math.pi/17.5 xs1 = 1.07 * w0 ys1 = -0.98 * w0 waistx = 0.9 * w0 waisty = 1.8 * w0 maxValue = 0. for iLoop in range(numX): for jLoop in range(numY): xArg = (xArr[iLoop]-xs1)*math.cos(phi1) + (yArr[jLoop]-ys1)*math.sin(phi1) yArg = -(xArr[iLoop]-xs1)*math.sin(phi1) + (yArr[jLoop]-ys1)*math.cos(phi1) ExGrid[iLoop, jLoop] = exMax*math.exp(-(xArg/waistx)**2)*math.exp(-(yArg/waisty)**2) maxValue = max(ExGrid[iLoop, jLoop], maxValue) # Divide out the maximum value ExGrid /= maxValue # Calculate residuals for the least squares analysis # params - array of fitting parameters numFuncCalls = 0 def residuals(params, e, x, y): global mMax, nMax, numFuncCalls, hermiteSeries hermiteSeries.setWaistX(params[0]) hermiteSeries.setWaistY(params[1]) hermiteSeries.setWRotAngle(params[2]) hermiteSeries.setXShift(params[3]) hermiteSeries.setYShift(params[4]) hermiteSeries.setMCoef(params[5:mMax+6]) hermiteSeries.setNCoef(params[mMax+6:mMax+nMax+7]) # let the user know what's going on if many function calls are required if numFuncCalls == 0: print ' ' print 'Number of calls to method residual():' numFuncCalls += 1 if 10*int(numFuncCalls/10.) == numFuncCalls: print ' ', numFuncCalls return e-hermiteSeries.evaluateEx(x,y,0.,0.) # plot the transverse field profile ncLevels = 12 vLevels = [0.001, 0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.05] plt.figure(1) cs1 = plt.contourf(xGrid, yGrid, ExGrid, vLevels) plt.colorbar(cs1) plt.axis([xMin, xMax, yMin, yMax]) plt.xlabel('x [m]') plt.ylabel('y [m]') plt.title('x-section #1: Gaussian w/ slight asymmetry & rotation') # choose initial guesses for all fitting parameters # also, specify the scale of variations for each paramGuess = np.zeros(mMax+nMax+7) paramGuess[0] = 1.2*w0 # horizontal waist paramGuess[1] = 0.9*w0 # vertical waist paramGuess[2] = 0.0 # rotation angle paramGuess[3] = 0.0 # horizontal shift paramGuess[4] = 0.0 # vertical shift paramGuess[5] = 1.0 # 0th horiz. coeff for iLoop in range(6,mMax+6): paramGuess[iLoop] = 0.0 # other horiz. coeff's paramGuess[mMax+6] = 1.0 # 0th vertical coeff for iLoop in range(mMax+7,mMax+nMax+7): paramGuess[iLoop] = 0.0 # other vertical coeff's # invoke the least squares algorithm result = leastsq(residuals, paramGuess, \ args=(np.reshape(ExGrid,nCells), \ np.reshape(xGrid,nCells), \ np.reshape(yGrid,nCells)), \ full_output=True, ftol=1e-6, \ maxfev=200) parFit = result[0] nEvals = result[2]['nfev'] resVals = result[2]['fvec'] message = result[3] iError = result[4] print ' ' print ' iError = ', iError print ' message = ', message print ' nEvals = ', nEvals print ' resVals = ', resVals # load the results into named variables (for clarity) wxFit = parFit[0] wyFit = parFit[1] tmpPhi = parFit[2] phiFit = tmpPhi - 2.*math.pi*int(0.5*tmpPhi/math.pi) if phiFit > 2.*math.pi: phiFit -= 2.*math.pi if phiFit < 0.: phiFit += 2.*math.pi xsFit = parFit[3] ysFit = parFit[4] mCFit = np.zeros(mMax+1) mCFit[0:mMax+1] = parFit[5:mMax+6] nCFit = np.zeros(nMax+1) nCFit[0:nMax+1] = parFit[mMax+6:mMax+nMax+7] # check the results print ' ' print 'The least squares minimimization has completed:' print ' wx = ', waistx, '; ', wxFit print ' wy = ', waisty, '; ', wyFit print ' phi = ', phi1, '; ', phiFit print ' xS = ', xs1, '; ', xsFit print ' yS = ', ys1, '; ', ysFit print ' C0x * C0y = 1.0; ', mCFit[0]*nCFit[0] # print ' C1x = 0.0 ; ', mCFit[1] # print ' C2x = 0.0 ; ', mCFit[2] # print ' C3x = 0.0 ; ', mCFit[3] # print ' C4x = 0.0 ; ', mCFit[4] # print ' C1y = 0.0 ; ', nCFit[1] # print ' C2y = 0.0 ; ', nCFit[2] # print ' C3y = 0.0 ; ', nCFit[3] # print ' C4y = 0.0 ; ', nCFit[4] # load up the fitted electric field at all grid points hermiteSeries.setWaistX(wxFit) hermiteSeries.setWaistY(wyFit) hermiteSeries.setWRotAngle(phiFit) hermiteSeries.setXShift(xsFit) hermiteSeries.setYShift(ysFit) hermiteSeries.setMCoef(mCFit) hermiteSeries.setNCoef(nCFit) ExFit = np.reshape(hermiteSeries.evaluateEx( np.reshape(xGrid,nCells), \ np.reshape(yGrid,nCells), 0., 0.), \ (numX, numY)) # plot the fitted transverse field profile plt.figure(2) cs2 = plt.contourf(xGrid, yGrid, ExFit, vLevels) plt.colorbar(cs2) plt.axis([xMin, xMax, yMin, yMax]) plt.xlabel('x [m]') plt.ylabel('y [m]') plt.title('x-section #1: Result of the least squares fit') # plot the transverse profile of the difference plt.figure(3) cs3 = plt.contourf(xGrid, yGrid, ExFit-ExGrid, ncLevels) plt.colorbar(cs3) plt.axis([xMin, xMax, yMin, yMax]) plt.xlabel('x [m]') plt.ylabel('y [m]') plt.title('x-section #1: Absolute differences in Ex') plt.show()
apache-2.0
OpenFacetracker/facetracker-core
lib/youtube-dl/youtube_dl/extractor/shared.py
31
1936
from __future__ import unicode_literals import re import base64 from .common import InfoExtractor from ..compat import ( compat_urllib_parse, compat_urllib_request, ) from ..utils import ( ExtractorError, int_or_none, ) class SharedIE(InfoExtractor): _VALID_URL = r'http://shared\.sx/(?P<id>[\da-z]{10})' _TEST = { 'url': 'http://shared.sx/0060718775', 'md5': '106fefed92a8a2adb8c98e6a0652f49b', 'info_dict': { 'id': '0060718775', 'ext': 'mp4', 'title': 'Bmp4', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if '>File does not exist<' in webpage: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) download_form = dict(re.findall( r'<input type="hidden" name="([^"]+)" value="([^"]*)"', webpage)) request = compat_urllib_request.Request( url, compat_urllib_parse.urlencode(download_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') video_page = self._download_webpage( request, video_id, 'Downloading video page') video_url = self._html_search_regex( r'data-url="([^"]+)"', video_page, 'video URL') title = base64.b64decode(self._html_search_meta( 'full:title', webpage, 'title')).decode('utf-8') filesize = int_or_none(self._html_search_meta( 'full:size', webpage, 'file size', fatal=False)) thumbnail = self._html_search_regex( r'data-poster="([^"]+)"', video_page, 'thumbnail', default=None) return { 'id': video_id, 'url': video_url, 'ext': 'mp4', 'filesize': filesize, 'title': title, 'thumbnail': thumbnail, }
gpl-2.0
ekwoodrich/nirha
nirhaweb/venv/lib/python2.7/site-packages/pip/req.py
328
83557
from email.parser import FeedParser import os import imp import locale import re import sys import shutil import tempfile import textwrap import zipfile from distutils.util import change_root from pip.locations import (bin_py, running_under_virtualenv,PIP_DELETE_MARKER_FILENAME, write_delete_marker_file, bin_user) from pip.exceptions import (InstallationError, UninstallationError, UnsupportedWheel, BestVersionAlreadyInstalled, InvalidWheelFilename, DistributionNotFound, PreviousBuildDirError) from pip.vcs import vcs from pip.log import logger from pip.util import (display_path, rmtree, ask, ask_path_exists, backup_dir, is_installable_dir, is_local, dist_is_local, dist_in_usersite, dist_in_site_packages, renames, normalize_path, egg_link_path, make_path_relative, call_subprocess, is_prerelease, normalize_name) from pip.backwardcompat import (urlparse, urllib, uses_pycache, ConfigParser, string_types, HTTPError, get_python_version, b) from pip.index import Link from pip.locations import build_prefix from pip.download import (PipSession, get_file_content, is_url, url_to_path, path_to_url, is_archive_file, unpack_vcs_link, is_vcs_url, is_file_url, unpack_file_url, unpack_http_url) import pip.wheel from pip.wheel import move_wheel_files, Wheel, wheel_ext from pip._vendor import pkg_resources, six def read_text_file(filename): """Return the contents of *filename*. Try to decode the file contents with utf-8, the preffered system encoding (e.g., cp1252 on some Windows machines) and latin1, in that order. Decoding a byte string with latin1 will never raise an error. In the worst case, the returned string will contain some garbage characters. """ with open(filename, 'rb') as fp: data = fp.read() encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1'] for enc in encodings: try: data = data.decode(enc) except UnicodeDecodeError: continue break assert type(data) != bytes # Latin1 should have worked. return data class InstallRequirement(object): def __init__(self, req, comes_from, source_dir=None, editable=False, url=None, as_egg=False, update=True, prereleases=None, editable_options=None, from_bundle=False, pycompile=True): self.extras = () if isinstance(req, string_types): req = pkg_resources.Requirement.parse(req) self.extras = req.extras self.req = req self.comes_from = comes_from self.source_dir = source_dir self.editable = editable if editable_options is None: editable_options = {} self.editable_options = editable_options self.url = url self.as_egg = as_egg self._egg_info_path = None # This holds the pkg_resources.Distribution object if this requirement # is already available: self.satisfied_by = None # This hold the pkg_resources.Distribution object if this requirement # conflicts with another installed distribution: self.conflicts_with = None self._temp_build_dir = None self._is_bundle = None # True if the editable should be updated: self.update = update # Set to True after successful installation self.install_succeeded = None # UninstallPathSet of uninstalled distribution (for possible rollback) self.uninstalled = None self.use_user_site = False self.target_dir = None self.from_bundle = from_bundle self.pycompile = pycompile # True if pre-releases are acceptable if prereleases: self.prereleases = True elif self.req is not None: self.prereleases = any([is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs]) else: self.prereleases = False @classmethod def from_editable(cls, editable_req, comes_from=None, default_vcs=None): name, url, extras_override = parse_editable(editable_req, default_vcs) if url.startswith('file:'): source_dir = url_to_path(url) else: source_dir = None res = cls(name, comes_from, source_dir=source_dir, editable=True, url=url, editable_options=extras_override, prereleases=True) if extras_override is not None: res.extras = extras_override return res @classmethod def from_line(cls, name, comes_from=None, prereleases=None): """Creates an InstallRequirement from a name, which might be a requirement, directory containing 'setup.py', filename, or URL. """ url = None name = name.strip() req = None path = os.path.normpath(os.path.abspath(name)) link = None if is_url(name): link = Link(name) elif os.path.isdir(path) and (os.path.sep in name or name.startswith('.')): if not is_installable_dir(path): raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % name) link = Link(path_to_url(name)) elif is_archive_file(path): if not os.path.isfile(path): logger.warn('Requirement %r looks like a filename, but the file does not exist', name) link = Link(path_to_url(name)) # If the line has an egg= definition, but isn't editable, pull the requirement out. # Otherwise, assume the name is the req for the non URL/path/archive case. if link and req is None: url = link.url_without_fragment req = link.egg_fragment #when fragment is None, this will become an 'unnamed' requirement # Handle relative file URLs if link.scheme == 'file' and re.search(r'\.\./', url): url = path_to_url(os.path.normpath(os.path.abspath(link.path))) # fail early for invalid or unsupported wheels if link.ext == wheel_ext: wheel = Wheel(link.filename) # can raise InvalidWheelFilename if not wheel.supported(): raise UnsupportedWheel("%s is not a supported wheel on this platform." % wheel.filename) else: req = name return cls(req, comes_from, url=url, prereleases=prereleases) def __str__(self): if self.req: s = str(self.req) if self.url: s += ' from %s' % self.url else: s = self.url if self.satisfied_by is not None: s += ' in %s' % display_path(self.satisfied_by.location) if self.comes_from: if isinstance(self.comes_from, string_types): comes_from = self.comes_from else: comes_from = self.comes_from.from_path() if comes_from: s += ' (from %s)' % comes_from return s def from_path(self): if self.req is None: return None s = str(self.req) if self.comes_from: if isinstance(self.comes_from, string_types): comes_from = self.comes_from else: comes_from = self.comes_from.from_path() if comes_from: s += '->' + comes_from return s def build_location(self, build_dir, unpack=True): if self._temp_build_dir is not None: return self._temp_build_dir if self.req is None: self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-') self._ideal_build_dir = build_dir return self._temp_build_dir if self.editable: name = self.name.lower() else: name = self.name # FIXME: Is there a better place to create the build_dir? (hg and bzr need this) if not os.path.exists(build_dir): _make_build_dir(build_dir) return os.path.join(build_dir, name) def correct_build_location(self): """If the build location was a temporary directory, this will move it to a new more permanent location""" if self.source_dir is not None: return assert self.req is not None assert self._temp_build_dir old_location = self._temp_build_dir new_build_dir = self._ideal_build_dir del self._ideal_build_dir if self.editable: name = self.name.lower() else: name = self.name new_location = os.path.join(new_build_dir, name) if not os.path.exists(new_build_dir): logger.debug('Creating directory %s' % new_build_dir) _make_build_dir(new_build_dir) if os.path.exists(new_location): raise InstallationError( 'A package already exists in %s; please remove it to continue' % display_path(new_location)) logger.debug('Moving package %s from %s to new location %s' % (self, display_path(old_location), display_path(new_location))) shutil.move(old_location, new_location) self._temp_build_dir = new_location self.source_dir = new_location self._egg_info_path = None @property def name(self): if self.req is None: return None return self.req.project_name @property def url_name(self): if self.req is None: return None return urllib.quote(self.req.unsafe_name) @property def setup_py(self): try: import setuptools except ImportError: # Setuptools is not available raise InstallationError( "setuptools must be installed to install from a source " "distribution" ) setup_file = 'setup.py' if self.editable_options and 'subdirectory' in self.editable_options: setup_py = os.path.join(self.source_dir, self.editable_options['subdirectory'], setup_file) else: setup_py = os.path.join(self.source_dir, setup_file) # Python2 __file__ should not be unicode if six.PY2 and isinstance(setup_py, six.text_type): setup_py = setup_py.encode(sys.getfilesystemencoding()) return setup_py def run_egg_info(self, force_root_egg_info=False): assert self.source_dir if self.name: logger.notify('Running setup.py (path:%s) egg_info for package %s' % (self.setup_py, self.name)) else: logger.notify('Running setup.py (path:%s) egg_info for package from %s' % (self.setup_py, self.url)) logger.indent += 2 try: # if it's distribute>=0.7, it won't contain an importable # setuptools, and having an egg-info dir blocks the ability of # setup.py to find setuptools plugins, so delete the egg-info dir if # no setuptools. it will get recreated by the run of egg_info # NOTE: this self.name check only works when installing from a specifier # (not archive path/urls) # TODO: take this out later if self.name == 'distribute' and not os.path.isdir(os.path.join(self.source_dir, 'setuptools')): rmtree(os.path.join(self.source_dir, 'distribute.egg-info')) script = self._run_setup_py script = script.replace('__SETUP_PY__', repr(self.setup_py)) script = script.replace('__PKG_NAME__', repr(self.name)) egg_info_cmd = [sys.executable, '-c', script, 'egg_info'] # We can't put the .egg-info files at the root, because then the source code will be mistaken # for an installed egg, causing problems if self.editable or force_root_egg_info: egg_base_option = [] else: egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info') if not os.path.exists(egg_info_dir): os.makedirs(egg_info_dir) egg_base_option = ['--egg-base', 'pip-egg-info'] call_subprocess( egg_info_cmd + egg_base_option, cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False, command_level=logger.VERBOSE_DEBUG, command_desc='python setup.py egg_info') finally: logger.indent -= 2 if not self.req: self.req = pkg_resources.Requirement.parse( "%(Name)s==%(Version)s" % self.pkg_info()) self.correct_build_location() ## FIXME: This is a lame hack, entirely for PasteScript which has ## a self-provided entry point that causes this awkwardness _run_setup_py = """ __file__ = __SETUP_PY__ from setuptools.command import egg_info import pkg_resources import os import tokenize def replacement_run(self): self.mkpath(self.egg_info) installer = self.distribution.fetch_build_egg for ep in pkg_resources.iter_entry_points('egg_info.writers'): # require=False is the change we're making: writer = ep.load(require=False) if writer: writer(self, ep.name, os.path.join(self.egg_info,ep.name)) self.find_sources() egg_info.egg_info.run = replacement_run exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec')) """ def egg_info_data(self, filename): if self.satisfied_by is not None: if not self.satisfied_by.has_metadata(filename): return None return self.satisfied_by.get_metadata(filename) assert self.source_dir filename = self.egg_info_path(filename) if not os.path.exists(filename): return None data = read_text_file(filename) return data def egg_info_path(self, filename): if self._egg_info_path is None: if self.editable: base = self.source_dir else: base = os.path.join(self.source_dir, 'pip-egg-info') filenames = os.listdir(base) if self.editable: filenames = [] for root, dirs, files in os.walk(base): for dir in vcs.dirnames: if dir in dirs: dirs.remove(dir) # Iterate over a copy of ``dirs``, since mutating # a list while iterating over it can cause trouble. # (See https://github.com/pypa/pip/pull/462.) for dir in list(dirs): # Don't search in anything that looks like a virtualenv environment if (os.path.exists(os.path.join(root, dir, 'bin', 'python')) or os.path.exists(os.path.join(root, dir, 'Scripts', 'Python.exe'))): dirs.remove(dir) # Also don't search through tests if dir == 'test' or dir == 'tests': dirs.remove(dir) filenames.extend([os.path.join(root, dir) for dir in dirs]) filenames = [f for f in filenames if f.endswith('.egg-info')] if not filenames: raise InstallationError('No files/directories in %s (from %s)' % (base, filename)) assert filenames, "No files/directories in %s (from %s)" % (base, filename) # if we have more than one match, we pick the toplevel one. This can # easily be the case if there is a dist folder which contains an # extracted tarball for testing purposes. if len(filenames) > 1: filenames.sort(key=lambda x: x.count(os.path.sep) + (os.path.altsep and x.count(os.path.altsep) or 0)) self._egg_info_path = os.path.join(base, filenames[0]) return os.path.join(self._egg_info_path, filename) def egg_info_lines(self, filename): data = self.egg_info_data(filename) if not data: return [] result = [] for line in data.splitlines(): line = line.strip() if not line or line.startswith('#'): continue result.append(line) return result def pkg_info(self): p = FeedParser() data = self.egg_info_data('PKG-INFO') if not data: logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO'))) p.feed(data or '') return p.close() @property def dependency_links(self): return self.egg_info_lines('dependency_links.txt') _requirements_section_re = re.compile(r'\[(.*?)\]') def requirements(self, extras=()): in_extra = None for line in self.egg_info_lines('requires.txt'): match = self._requirements_section_re.match(line.lower()) if match: in_extra = match.group(1) continue if in_extra and in_extra not in extras: logger.debug('skipping extra %s' % in_extra) # Skip requirement for an extra we aren't requiring continue yield line @property def absolute_versions(self): for qualifier, version in self.req.specs: if qualifier == '==': yield version @property def installed_version(self): return self.pkg_info()['version'] def assert_source_matches_version(self): assert self.source_dir version = self.installed_version if version not in self.req: logger.warn('Requested %s, but installing version %s' % (self, self.installed_version)) else: logger.debug('Source in %s has version %s, which satisfies requirement %s' % (display_path(self.source_dir), version, self)) def update_editable(self, obtain=True): if not self.url: logger.info("Cannot update repository at %s; repository location is unknown" % self.source_dir) return assert self.editable assert self.source_dir if self.url.startswith('file:'): # Static paths don't get updated return assert '+' in self.url, "bad url: %r" % self.url if not self.update: return vc_type, url = self.url.split('+', 1) backend = vcs.get_backend(vc_type) if backend: vcs_backend = backend(self.url) if obtain: vcs_backend.obtain(self.source_dir) else: vcs_backend.export(self.source_dir) else: assert 0, ( 'Unexpected version control type (in %s): %s' % (self.url, vc_type)) def uninstall(self, auto_confirm=False): """ Uninstall the distribution currently satisfying this requirement. Prompts before removing or modifying files unless ``auto_confirm`` is True. Refuses to delete or modify files outside of ``sys.prefix`` - thus uninstallation within a virtual environment can only modify that virtual environment, even if the virtualenv is linked to global site-packages. """ if not self.check_if_exists(): raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,)) dist = self.satisfied_by or self.conflicts_with paths_to_remove = UninstallPathSet(dist) pip_egg_info_path = os.path.join(dist.location, dist.egg_name()) + '.egg-info' dist_info_path = os.path.join(dist.location, '-'.join(dist.egg_name().split('-')[:2]) ) + '.dist-info' # workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367 debian_egg_info_path = pip_egg_info_path.replace( '-py%s' % pkg_resources.PY_MAJOR, '') easy_install_egg = dist.egg_name() + '.egg' develop_egg_link = egg_link_path(dist) pip_egg_info_exists = os.path.exists(pip_egg_info_path) debian_egg_info_exists = os.path.exists(debian_egg_info_path) dist_info_exists = os.path.exists(dist_info_path) if pip_egg_info_exists or debian_egg_info_exists: # package installed by pip if pip_egg_info_exists: egg_info_path = pip_egg_info_path else: egg_info_path = debian_egg_info_path paths_to_remove.add(egg_info_path) if dist.has_metadata('installed-files.txt'): for installed_file in dist.get_metadata('installed-files.txt').splitlines(): path = os.path.normpath(os.path.join(egg_info_path, installed_file)) paths_to_remove.add(path) #FIXME: need a test for this elif block #occurs with --single-version-externally-managed/--record outside of pip elif dist.has_metadata('top_level.txt'): if dist.has_metadata('namespace_packages.txt'): namespaces = dist.get_metadata('namespace_packages.txt') else: namespaces = [] for top_level_pkg in [p for p in dist.get_metadata('top_level.txt').splitlines() if p and p not in namespaces]: path = os.path.join(dist.location, top_level_pkg) paths_to_remove.add(path) paths_to_remove.add(path + '.py') paths_to_remove.add(path + '.pyc') elif dist.location.endswith(easy_install_egg): # package installed by easy_install paths_to_remove.add(dist.location) easy_install_pth = os.path.join(os.path.dirname(dist.location), 'easy-install.pth') paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) elif develop_egg_link: # develop egg fh = open(develop_egg_link, 'r') link_pointer = os.path.normcase(fh.readline().strip()) fh.close() assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location) paths_to_remove.add(develop_egg_link) easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), 'easy-install.pth') paths_to_remove.add_pth(easy_install_pth, dist.location) elif dist_info_exists: for path in pip.wheel.uninstallation_paths(dist): paths_to_remove.add(path) # find distutils scripts= scripts if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): for script in dist.metadata_listdir('scripts'): if dist_in_usersite(dist): bin_dir = bin_user else: bin_dir = bin_py paths_to_remove.add(os.path.join(bin_dir, script)) if sys.platform == 'win32': paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') # find console_scripts if dist.has_metadata('entry_points.txt'): config = ConfigParser.SafeConfigParser() config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt'))) if config.has_section('console_scripts'): for name, value in config.items('console_scripts'): if dist_in_usersite(dist): bin_dir = bin_user else: bin_dir = bin_py paths_to_remove.add(os.path.join(bin_dir, name)) if sys.platform == 'win32': paths_to_remove.add(os.path.join(bin_dir, name) + '.exe') paths_to_remove.add(os.path.join(bin_dir, name) + '.exe.manifest') paths_to_remove.add(os.path.join(bin_dir, name) + '-script.py') paths_to_remove.remove(auto_confirm) self.uninstalled = paths_to_remove def rollback_uninstall(self): if self.uninstalled: self.uninstalled.rollback() else: logger.error("Can't rollback %s, nothing uninstalled." % (self.project_name,)) def commit_uninstall(self): if self.uninstalled: self.uninstalled.commit() else: logger.error("Can't commit %s, nothing uninstalled." % (self.project_name,)) def archive(self, build_dir): assert self.source_dir create_archive = True archive_name = '%s-%s.zip' % (self.name, self.installed_version) archive_path = os.path.join(build_dir, archive_name) if os.path.exists(archive_path): response = ask_path_exists( 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % display_path(archive_path), ('i', 'w', 'b')) if response == 'i': create_archive = False elif response == 'w': logger.warn('Deleting %s' % display_path(archive_path)) os.remove(archive_path) elif response == 'b': dest_file = backup_dir(archive_path) logger.warn('Backing up %s to %s' % (display_path(archive_path), display_path(dest_file))) shutil.move(archive_path, dest_file) if create_archive: zip = zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED) dir = os.path.normcase(os.path.abspath(self.source_dir)) for dirpath, dirnames, filenames in os.walk(dir): if 'pip-egg-info' in dirnames: dirnames.remove('pip-egg-info') for dirname in dirnames: dirname = os.path.join(dirpath, dirname) name = self._clean_zip_name(dirname, dir) zipdir = zipfile.ZipInfo(self.name + '/' + name + '/') zipdir.external_attr = 0x1ED << 16 # 0o755 zip.writestr(zipdir, '') for filename in filenames: if filename == PIP_DELETE_MARKER_FILENAME: continue filename = os.path.join(dirpath, filename) name = self._clean_zip_name(filename, dir) zip.write(filename, self.name + '/' + name) zip.close() logger.indent -= 2 logger.notify('Saved %s' % display_path(archive_path)) def _clean_zip_name(self, name, prefix): assert name.startswith(prefix+os.path.sep), ( "name %r doesn't start with prefix %r" % (name, prefix)) name = name[len(prefix)+1:] name = name.replace(os.path.sep, '/') return name def install(self, install_options, global_options=(), root=None): if self.editable: self.install_editable(install_options, global_options) return if self.is_wheel: version = pip.wheel.wheel_version(self.source_dir) pip.wheel.check_compatibility(version, self.name) self.move_wheel_files(self.source_dir, root=root) self.install_succeeded = True return temp_location = tempfile.mkdtemp('-record', 'pip-') record_filename = os.path.join(temp_location, 'install-record.txt') try: install_args = [sys.executable] install_args.append('-c') install_args.append( "import setuptools, tokenize;__file__=%r;"\ "exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py) install_args += list(global_options) + ['install','--record', record_filename] if not self.as_egg: install_args += ['--single-version-externally-managed'] if root is not None: install_args += ['--root', root] if self.pycompile: install_args += ["--compile"] else: install_args += ["--no-compile"] if running_under_virtualenv(): ## FIXME: I'm not sure if this is a reasonable location; probably not ## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable install_args += ['--install-headers', os.path.join(sys.prefix, 'include', 'site', 'python' + get_python_version())] logger.notify('Running setup.py install for %s' % self.name) logger.indent += 2 try: call_subprocess(install_args + install_options, cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False) finally: logger.indent -= 2 if not os.path.exists(record_filename): logger.notify('Record file %s not found' % record_filename) return self.install_succeeded = True if self.as_egg: # there's no --always-unzip option we can pass to install command # so we unable to save the installed-files.txt return def prepend_root(path): if root is None or not os.path.isabs(path): return path else: return change_root(root, path) f = open(record_filename) for line in f: line = line.strip() if line.endswith('.egg-info'): egg_info_dir = prepend_root(line) break else: logger.warn('Could not find .egg-info directory in install record for %s' % self) ## FIXME: put the record somewhere ## FIXME: should this be an error? return f.close() new_lines = [] f = open(record_filename) for line in f: filename = line.strip() if os.path.isdir(filename): filename += os.path.sep new_lines.append(make_path_relative(prepend_root(filename), egg_info_dir)) f.close() f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w') f.write('\n'.join(new_lines)+'\n') f.close() finally: if os.path.exists(record_filename): os.remove(record_filename) os.rmdir(temp_location) def remove_temporary_source(self): """Remove the source files from this requirement, if they are marked for deletion""" if self.is_bundle or os.path.exists(self.delete_marker_filename): logger.info('Removing source in %s' % self.source_dir) if self.source_dir: rmtree(self.source_dir) self.source_dir = None if self._temp_build_dir and os.path.exists(self._temp_build_dir): rmtree(self._temp_build_dir) self._temp_build_dir = None def install_editable(self, install_options, global_options=()): logger.notify('Running setup.py develop for %s' % self.name) logger.indent += 2 try: ## FIXME: should we do --install-headers here too? call_subprocess( [sys.executable, '-c', "import setuptools, tokenize; __file__=%r; exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py] + list(global_options) + ['develop', '--no-deps'] + list(install_options), cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False) finally: logger.indent -= 2 self.install_succeeded = True def _filter_install(self, line): level = logger.NOTIFY for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*', r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$", r'^byte-compiling ', # Not sure what this warning is, but it seems harmless: r"^warning: manifest_maker: standard file '-c' not found$"]: if re.search(regex, line.strip()): level = logger.INFO break return (level, line) def check_if_exists(self): """Find an installed distribution that satisfies or conflicts with this requirement, and set self.satisfied_by or self.conflicts_with appropriately.""" if self.req is None: return False try: # DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts) # if we've already set distribute as a conflict to setuptools # then this check has already run before. we don't want it to # run again, and return False, since it would block the uninstall # TODO: remove this later if (self.req.project_name == 'setuptools' and self.conflicts_with and self.conflicts_with.project_name == 'distribute'): return True else: self.satisfied_by = pkg_resources.get_distribution(self.req) except pkg_resources.DistributionNotFound: return False except pkg_resources.VersionConflict: existing_dist = pkg_resources.get_distribution(self.req.project_name) if self.use_user_site: if dist_in_usersite(existing_dist): self.conflicts_with = existing_dist elif running_under_virtualenv() and dist_in_site_packages(existing_dist): raise InstallationError("Will not install to the user site because it will lack sys.path precedence to %s in %s" %(existing_dist.project_name, existing_dist.location)) else: self.conflicts_with = existing_dist return True @property def is_wheel(self): return self.url and '.whl' in self.url @property def is_bundle(self): if self._is_bundle is not None: return self._is_bundle base = self._temp_build_dir if not base: ## FIXME: this doesn't seem right: return False self._is_bundle = (os.path.exists(os.path.join(base, 'pip-manifest.txt')) or os.path.exists(os.path.join(base, 'pyinstall-manifest.txt'))) return self._is_bundle def bundle_requirements(self): for dest_dir in self._bundle_editable_dirs: package = os.path.basename(dest_dir) ## FIXME: svnism: for vcs_backend in vcs.backends: url = rev = None vcs_bundle_file = os.path.join( dest_dir, vcs_backend.bundle_file) if os.path.exists(vcs_bundle_file): vc_type = vcs_backend.name fp = open(vcs_bundle_file) content = fp.read() fp.close() url, rev = vcs_backend().parse_vcs_bundle_file(content) break if url: url = '%s+%s@%s' % (vc_type, url, rev) else: url = None yield InstallRequirement( package, self, editable=True, url=url, update=False, source_dir=dest_dir, from_bundle=True) for dest_dir in self._bundle_build_dirs: package = os.path.basename(dest_dir) yield InstallRequirement(package, self,source_dir=dest_dir, from_bundle=True) def move_bundle_files(self, dest_build_dir, dest_src_dir): base = self._temp_build_dir assert base src_dir = os.path.join(base, 'src') build_dir = os.path.join(base, 'build') bundle_build_dirs = [] bundle_editable_dirs = [] for source_dir, dest_dir, dir_collection in [ (src_dir, dest_src_dir, bundle_editable_dirs), (build_dir, dest_build_dir, bundle_build_dirs)]: if os.path.exists(source_dir): for dirname in os.listdir(source_dir): dest = os.path.join(dest_dir, dirname) dir_collection.append(dest) if os.path.exists(dest): logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s' % (dest, dirname, self)) continue if not os.path.exists(dest_dir): logger.info('Creating directory %s' % dest_dir) os.makedirs(dest_dir) shutil.move(os.path.join(source_dir, dirname), dest) if not os.listdir(source_dir): os.rmdir(source_dir) self._temp_build_dir = None self._bundle_build_dirs = bundle_build_dirs self._bundle_editable_dirs = bundle_editable_dirs def move_wheel_files(self, wheeldir, root=None): move_wheel_files( self.name, self.req, wheeldir, user=self.use_user_site, home=self.target_dir, root=root, pycompile=self.pycompile, ) @property def delete_marker_filename(self): assert self.source_dir return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME) class Requirements(object): def __init__(self): self._keys = [] self._dict = {} def keys(self): return self._keys def values(self): return [self._dict[key] for key in self._keys] def __contains__(self, item): return item in self._keys def __setitem__(self, key, value): if key not in self._keys: self._keys.append(key) self._dict[key] = value def __getitem__(self, key): return self._dict[key] def __repr__(self): values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()] return 'Requirements({%s})' % ', '.join(values) class RequirementSet(object): def __init__(self, build_dir, src_dir, download_dir, download_cache=None, upgrade=False, ignore_installed=False, as_egg=False, target_dir=None, ignore_dependencies=False, force_reinstall=False, use_user_site=False, session=None, pycompile=True, wheel_download_dir=None): self.build_dir = build_dir self.src_dir = src_dir self.download_dir = download_dir if download_cache: download_cache = os.path.expanduser(download_cache) self.download_cache = download_cache self.upgrade = upgrade self.ignore_installed = ignore_installed self.force_reinstall = force_reinstall self.requirements = Requirements() # Mapping of alias: real_name self.requirement_aliases = {} self.unnamed_requirements = [] self.ignore_dependencies = ignore_dependencies self.successfully_downloaded = [] self.successfully_installed = [] self.reqs_to_cleanup = [] self.as_egg = as_egg self.use_user_site = use_user_site self.target_dir = target_dir #set from --target option self.session = session or PipSession() self.pycompile = pycompile self.wheel_download_dir = wheel_download_dir def __str__(self): reqs = [req for req in self.requirements.values() if not req.comes_from] reqs.sort(key=lambda req: req.name.lower()) return ' '.join([str(req.req) for req in reqs]) def add_requirement(self, install_req): name = install_req.name install_req.as_egg = self.as_egg install_req.use_user_site = self.use_user_site install_req.target_dir = self.target_dir install_req.pycompile = self.pycompile if not name: #url or path requirement w/o an egg fragment self.unnamed_requirements.append(install_req) else: if self.has_requirement(name): raise InstallationError( 'Double requirement given: %s (already in %s, name=%r)' % (install_req, self.get_requirement(name), name)) self.requirements[name] = install_req ## FIXME: what about other normalizations? E.g., _ vs. -? if name.lower() != name: self.requirement_aliases[name.lower()] = name def has_requirement(self, project_name): for name in project_name, project_name.lower(): if name in self.requirements or name in self.requirement_aliases: return True return False @property def has_requirements(self): return list(self.requirements.values()) or self.unnamed_requirements @property def has_editables(self): if any(req.editable for req in self.requirements.values()): return True if any(req.editable for req in self.unnamed_requirements): return True return False @property def is_download(self): if self.download_dir: self.download_dir = os.path.expanduser(self.download_dir) if os.path.exists(self.download_dir): return True else: logger.fatal('Could not find download directory') raise InstallationError( "Could not find or access download directory '%s'" % display_path(self.download_dir)) return False def get_requirement(self, project_name): for name in project_name, project_name.lower(): if name in self.requirements: return self.requirements[name] if name in self.requirement_aliases: return self.requirements[self.requirement_aliases[name]] raise KeyError("No project with the name %r" % project_name) def uninstall(self, auto_confirm=False): for req in self.requirements.values(): req.uninstall(auto_confirm=auto_confirm) req.commit_uninstall() def locate_files(self): ## FIXME: duplicates code from prepare_files; relevant code should ## probably be factored out into a separate method unnamed = list(self.unnamed_requirements) reqs = list(self.requirements.values()) while reqs or unnamed: if unnamed: req_to_install = unnamed.pop(0) else: req_to_install = reqs.pop(0) install_needed = True if not self.ignore_installed and not req_to_install.editable: req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade: #don't uninstall conflict if user install and and conflict is not user install if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): req_to_install.conflicts_with = req_to_install.satisfied_by req_to_install.satisfied_by = None else: install_needed = False if req_to_install.satisfied_by: logger.notify('Requirement already satisfied ' '(use --upgrade to upgrade): %s' % req_to_install) if req_to_install.editable: if req_to_install.source_dir is None: req_to_install.source_dir = req_to_install.build_location(self.src_dir) elif install_needed: req_to_install.source_dir = req_to_install.build_location(self.build_dir, not self.is_download) if req_to_install.source_dir is not None and not os.path.isdir(req_to_install.source_dir): raise InstallationError('Could not install requirement %s ' 'because source folder %s does not exist ' '(perhaps --no-download was used without first running ' 'an equivalent install with --no-install?)' % (req_to_install, req_to_install.source_dir)) def prepare_files(self, finder, force_root_egg_info=False, bundle=False): """Prepare process. Create temp directories, download and/or unpack files.""" unnamed = list(self.unnamed_requirements) reqs = list(self.requirements.values()) while reqs or unnamed: if unnamed: req_to_install = unnamed.pop(0) else: req_to_install = reqs.pop(0) install = True best_installed = False not_found = None if not self.ignore_installed and not req_to_install.editable: req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade: if not self.force_reinstall and not req_to_install.url: try: url = finder.find_requirement( req_to_install, self.upgrade) except BestVersionAlreadyInstalled: best_installed = True install = False except DistributionNotFound: not_found = sys.exc_info()[1] else: # Avoid the need to call find_requirement again req_to_install.url = url.url if not best_installed: #don't uninstall conflict if user install and conflict is not user install if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): req_to_install.conflicts_with = req_to_install.satisfied_by req_to_install.satisfied_by = None else: install = False if req_to_install.satisfied_by: if best_installed: logger.notify('Requirement already up-to-date: %s' % req_to_install) else: logger.notify('Requirement already satisfied ' '(use --upgrade to upgrade): %s' % req_to_install) if req_to_install.editable: logger.notify('Obtaining %s' % req_to_install) elif install: if req_to_install.url and req_to_install.url.lower().startswith('file:'): logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url))) else: logger.notify('Downloading/unpacking %s' % req_to_install) logger.indent += 2 try: is_bundle = False is_wheel = False if req_to_install.editable: if req_to_install.source_dir is None: location = req_to_install.build_location(self.src_dir) req_to_install.source_dir = location else: location = req_to_install.source_dir if not os.path.exists(self.build_dir): _make_build_dir(self.build_dir) req_to_install.update_editable(not self.is_download) if self.is_download: req_to_install.run_egg_info() req_to_install.archive(self.download_dir) else: req_to_install.run_egg_info() elif install: ##@@ if filesystem packages are not marked ##editable in a req, a non deterministic error ##occurs when the script attempts to unpack the ##build directory # NB: This call can result in the creation of a temporary build directory location = req_to_install.build_location(self.build_dir, not self.is_download) unpack = True url = None # In the case where the req comes from a bundle, we should # assume a build dir exists and move on if req_to_install.from_bundle: pass # If a checkout exists, it's unwise to keep going. version # inconsistencies are logged later, but do not fail the # installation. elif os.path.exists(os.path.join(location, 'setup.py')): raise PreviousBuildDirError(textwrap.dedent(""" pip can't proceed with requirement '%s' due to a pre-existing build directory. location: %s This is likely due to a previous installation that failed. pip is being responsible and not assuming it can delete this. Please delete it and try again. """ % (req_to_install, location))) else: ## FIXME: this won't upgrade when there's an existing package unpacked in `location` if req_to_install.url is None: if not_found: raise not_found url = finder.find_requirement(req_to_install, upgrade=self.upgrade) else: ## FIXME: should req_to_install.url already be a link? url = Link(req_to_install.url) assert url if url: try: if ( url.filename.endswith(wheel_ext) and self.wheel_download_dir ): # when doing 'pip wheel` download_dir = self.wheel_download_dir do_download = True else: download_dir = self.download_dir do_download = self.is_download self.unpack_url( url, location, download_dir, do_download, ) except HTTPError as exc: logger.fatal( 'Could not install requirement %s because ' 'of error %s' % (req_to_install, exc) ) raise InstallationError( 'Could not install requirement %s because of HTTP error %s for URL %s' % (req_to_install, e, url)) else: unpack = False if unpack: is_bundle = req_to_install.is_bundle is_wheel = url and url.filename.endswith(wheel_ext) if is_bundle: req_to_install.move_bundle_files(self.build_dir, self.src_dir) for subreq in req_to_install.bundle_requirements(): reqs.append(subreq) self.add_requirement(subreq) elif self.is_download: req_to_install.source_dir = location if not is_wheel: # FIXME: see https://github.com/pypa/pip/issues/1112 req_to_install.run_egg_info() if url and url.scheme in vcs.all_schemes: req_to_install.archive(self.download_dir) elif is_wheel: req_to_install.source_dir = location req_to_install.url = url.url else: req_to_install.source_dir = location req_to_install.run_egg_info() if force_root_egg_info: # We need to run this to make sure that the .egg-info/ # directory is created for packing in the bundle req_to_install.run_egg_info(force_root_egg_info=True) req_to_install.assert_source_matches_version() #@@ sketchy way of identifying packages not grabbed from an index if bundle and req_to_install.url: self.copy_to_build_dir(req_to_install) install = False # req_to_install.req is only avail after unpack for URL pkgs # repeat check_if_exists to uninstall-on-upgrade (#14) if not self.ignore_installed: req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade or self.ignore_installed: #don't uninstall conflict if user install and and conflict is not user install if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): req_to_install.conflicts_with = req_to_install.satisfied_by req_to_install.satisfied_by = None else: logger.notify( 'Requirement already satisfied (use ' '--upgrade to upgrade): %s' % req_to_install ) install = False if is_wheel: dist = list( pkg_resources.find_distributions(location) )[0] if not req_to_install.req: req_to_install.req = dist.as_requirement() self.add_requirement(req_to_install) if not self.ignore_dependencies: for subreq in dist.requires( req_to_install.extras): if self.has_requirement( subreq.project_name): continue subreq = InstallRequirement(str(subreq), req_to_install) reqs.append(subreq) self.add_requirement(subreq) # sdists elif not is_bundle: ## FIXME: shouldn't be globally added: finder.add_dependency_links(req_to_install.dependency_links) if (req_to_install.extras): logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras)) if not self.ignore_dependencies: for req in req_to_install.requirements(req_to_install.extras): try: name = pkg_resources.Requirement.parse(req).project_name except ValueError: e = sys.exc_info()[1] ## FIXME: proper warning logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install)) continue if self.has_requirement(name): ## FIXME: check for conflict continue subreq = InstallRequirement(req, req_to_install) reqs.append(subreq) self.add_requirement(subreq) if not self.has_requirement(req_to_install.name): #'unnamed' requirements will get added here self.add_requirement(req_to_install) # cleanup tmp src if not is_bundle: if ( self.is_download or req_to_install._temp_build_dir is not None ): self.reqs_to_cleanup.append(req_to_install) if install: self.successfully_downloaded.append(req_to_install) if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')): self.copy_to_build_dir(req_to_install) finally: logger.indent -= 2 def cleanup_files(self, bundle=False): """Clean up files, remove builds.""" logger.notify('Cleaning up...') logger.indent += 2 for req in self.reqs_to_cleanup: req.remove_temporary_source() remove_dir = [] if self._pip_has_created_build_dir(): remove_dir.append(self.build_dir) # The source dir of a bundle can always be removed. # FIXME: not if it pre-existed the bundle! if bundle: remove_dir.append(self.src_dir) for dir in remove_dir: if os.path.exists(dir): logger.info('Removing temporary dir %s...' % dir) rmtree(dir) logger.indent -= 2 def _pip_has_created_build_dir(self): return (self.build_dir == build_prefix and os.path.exists(os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME))) def copy_to_build_dir(self, req_to_install): target_dir = req_to_install.editable and self.src_dir or self.build_dir logger.info("Copying %s to %s" % (req_to_install.name, target_dir)) dest = os.path.join(target_dir, req_to_install.name) shutil.copytree(req_to_install.source_dir, dest) call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest, command_desc='python setup.py clean') def unpack_url(self, link, location, download_dir=None, only_download=False): if download_dir is None: download_dir = self.download_dir # non-editable vcs urls if is_vcs_url(link): if only_download: loc = download_dir else: loc = location unpack_vcs_link(link, loc, only_download) # file urls elif is_file_url(link): unpack_file_url(link, location, download_dir) if only_download: write_delete_marker_file(location) # http urls else: unpack_http_url( link, location, self.download_cache, download_dir, self.session, ) if only_download: write_delete_marker_file(location) def install(self, install_options, global_options=(), *args, **kwargs): """Install everything in this set (after having downloaded and unpacked the packages)""" to_install = [r for r in self.requirements.values() if not r.satisfied_by] # DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts) # move the distribute-0.7.X wrapper to the end because it does not # install a setuptools package. by moving it to the end, we ensure it's # setuptools dependency is handled first, which will provide the # setuptools package # TODO: take this out later distribute_req = pkg_resources.Requirement.parse("distribute>=0.7") for req in to_install: if req.name == 'distribute' and req.installed_version in distribute_req: to_install.remove(req) to_install.append(req) if to_install: logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install])) logger.indent += 2 try: for requirement in to_install: # DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts) # when upgrading from distribute-0.6.X to the new merged # setuptools in py2, we need to force setuptools to uninstall # distribute. In py3, which is always using distribute, this # conversion is already happening in distribute's pkg_resources. # It's ok *not* to check if setuptools>=0.7 because if someone # were actually trying to ugrade from distribute to setuptools # 0.6.X, then all this could do is actually help, although that # upgade path was certainly never "supported" # TODO: remove this later if requirement.name == 'setuptools': try: # only uninstall distribute<0.7. For >=0.7, setuptools # will also be present, and that's what we need to # uninstall distribute_requirement = pkg_resources.Requirement.parse("distribute<0.7") existing_distribute = pkg_resources.get_distribution("distribute") if existing_distribute in distribute_requirement: requirement.conflicts_with = existing_distribute except pkg_resources.DistributionNotFound: # distribute wasn't installed, so nothing to do pass if requirement.conflicts_with: logger.notify('Found existing installation: %s' % requirement.conflicts_with) logger.indent += 2 try: requirement.uninstall(auto_confirm=True) finally: logger.indent -= 2 try: requirement.install(install_options, global_options, *args, **kwargs) except: # if install did not succeed, rollback previous uninstall if requirement.conflicts_with and not requirement.install_succeeded: requirement.rollback_uninstall() raise else: if requirement.conflicts_with and requirement.install_succeeded: requirement.commit_uninstall() requirement.remove_temporary_source() finally: logger.indent -= 2 self.successfully_installed = to_install def create_bundle(self, bundle_filename): ## FIXME: can't decide which is better; zip is easier to read ## random files from, but tar.bz2 is smaller and not as lame a ## format. ## FIXME: this file should really include a manifest of the ## packages, maybe some other metadata files. It would make ## it easier to detect as well. zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED) vcs_dirs = [] for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'): dir = os.path.normcase(os.path.abspath(dir)) for dirpath, dirnames, filenames in os.walk(dir): for backend in vcs.backends: vcs_backend = backend() vcs_url = vcs_rev = None if vcs_backend.dirname in dirnames: for vcs_dir in vcs_dirs: if dirpath.startswith(vcs_dir): # vcs bundle file already in parent directory break else: vcs_url, vcs_rev = vcs_backend.get_info( os.path.join(dir, dirpath)) vcs_dirs.append(dirpath) vcs_bundle_file = vcs_backend.bundle_file vcs_guide = vcs_backend.guide % {'url': vcs_url, 'rev': vcs_rev} dirnames.remove(vcs_backend.dirname) break if 'pip-egg-info' in dirnames: dirnames.remove('pip-egg-info') for dirname in dirnames: dirname = os.path.join(dirpath, dirname) name = self._clean_zip_name(dirname, dir) zip.writestr(basename + '/' + name + '/', '') for filename in filenames: if filename == PIP_DELETE_MARKER_FILENAME: continue filename = os.path.join(dirpath, filename) name = self._clean_zip_name(filename, dir) zip.write(filename, basename + '/' + name) if vcs_url: name = os.path.join(dirpath, vcs_bundle_file) name = self._clean_zip_name(name, dir) zip.writestr(basename + '/' + name, vcs_guide) zip.writestr('pip-manifest.txt', self.bundle_requirements()) zip.close() BUNDLE_HEADER = '''\ # This is a pip bundle file, that contains many source packages # that can be installed as a group. You can install this like: # pip this_file.zip # The rest of the file contains a list of all the packages included: ''' def bundle_requirements(self): parts = [self.BUNDLE_HEADER] for req in [req for req in self.requirements.values() if not req.comes_from]: parts.append('%s==%s\n' % (req.name, req.installed_version)) parts.append('# These packages were installed to satisfy the above requirements:\n') for req in [req for req in self.requirements.values() if req.comes_from]: parts.append('%s==%s\n' % (req.name, req.installed_version)) ## FIXME: should we do something with self.unnamed_requirements? return ''.join(parts) def _clean_zip_name(self, name, prefix): assert name.startswith(prefix+os.path.sep), ( "name %r doesn't start with prefix %r" % (name, prefix)) name = name[len(prefix)+1:] name = name.replace(os.path.sep, '/') return name def _make_build_dir(build_dir): os.makedirs(build_dir) write_delete_marker_file(build_dir) _scheme_re = re.compile(r'^(http|https|file):', re.I) def parse_requirements(filename, finder=None, comes_from=None, options=None, session=None): if session is None: session = PipSession() skip_match = None skip_regex = options.skip_requirements_regex if options else None if skip_regex: skip_match = re.compile(skip_regex) reqs_file_dir = os.path.dirname(os.path.abspath(filename)) filename, content = get_file_content(filename, comes_from=comes_from, session=session, ) for line_number, line in enumerate(content.splitlines()): line_number += 1 line = line.strip() # Remove comments from file line = re.sub(r"(^|\s)#.*$", "", line) if not line or line.startswith('#'): continue if skip_match and skip_match.search(line): continue if line.startswith('-r') or line.startswith('--requirement'): if line.startswith('-r'): req_url = line[2:].strip() else: req_url = line[len('--requirement'):].strip().strip('=') if _scheme_re.search(filename): # Relative to a URL req_url = urlparse.urljoin(filename, req_url) elif not _scheme_re.search(req_url): req_url = os.path.join(os.path.dirname(filename), req_url) for item in parse_requirements(req_url, finder, comes_from=filename, options=options, session=session): yield item elif line.startswith('-Z') or line.startswith('--always-unzip'): # No longer used, but previously these were used in # requirement files, so we'll ignore. pass elif line.startswith('-f') or line.startswith('--find-links'): if line.startswith('-f'): line = line[2:].strip() else: line = line[len('--find-links'):].strip().lstrip('=') ## FIXME: it would be nice to keep track of the source of ## the find_links: # support a find-links local path relative to a requirements file relative_to_reqs_file = os.path.join(reqs_file_dir, line) if os.path.exists(relative_to_reqs_file): line = relative_to_reqs_file if finder: finder.find_links.append(line) elif line.startswith('-i') or line.startswith('--index-url'): if line.startswith('-i'): line = line[2:].strip() else: line = line[len('--index-url'):].strip().lstrip('=') if finder: finder.index_urls = [line] elif line.startswith('--extra-index-url'): line = line[len('--extra-index-url'):].strip().lstrip('=') if finder: finder.index_urls.append(line) elif line.startswith('--use-wheel'): finder.use_wheel = True elif line.startswith('--no-index'): finder.index_urls = [] elif line.startswith("--allow-external"): line = line[len("--allow-external"):].strip().lstrip("=") finder.allow_external |= set([normalize_name(line).lower()]) elif line.startswith("--allow-all-external"): finder.allow_all_external = True # Remove in 1.7 elif line.startswith("--no-allow-external"): pass # Remove in 1.7 elif line.startswith("--no-allow-insecure"): pass # Remove after 1.7 elif line.startswith("--allow-insecure"): line = line[len("--allow-insecure"):].strip().lstrip("=") finder.allow_unverified |= set([normalize_name(line).lower()]) elif line.startswith("--allow-unverified"): line = line[len("--allow-unverified"):].strip().lstrip("=") finder.allow_unverified |= set([normalize_name(line).lower()]) else: comes_from = '-r %s (line %s)' % (filename, line_number) if line.startswith('-e') or line.startswith('--editable'): if line.startswith('-e'): line = line[2:].strip() else: line = line[len('--editable'):].strip().lstrip('=') req = InstallRequirement.from_editable( line, comes_from=comes_from, default_vcs=options.default_vcs if options else None) else: req = InstallRequirement.from_line(line, comes_from, prereleases=getattr(options, "pre", None)) yield req def _strip_postfix(req): """ Strip req postfix ( -dev, 0.2, etc ) """ ## FIXME: use package_to_requirement? match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req) if match: # Strip off -dev, -0.2, etc. req = match.group(1) return req def _build_req_from_url(url): parts = [p for p in url.split('#', 1)[0].split('/') if p] req = None if parts[-2] in ('tags', 'branches', 'tag', 'branch'): req = parts[-3] elif parts[-1] == 'trunk': req = parts[-2] return req def _build_editable_options(req): """ This method generates a dictionary of the query string parameters contained in a given editable URL. """ regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)") matched = regexp.findall(req) if matched: ret = dict() for option in matched: (name, value) = option if name in ret: raise Exception("%s option already defined" % name) ret[name] = value return ret return None def parse_editable(editable_req, default_vcs=None): """Parses svn+http://blahblah@rev#egg=Foobar into a requirement (Foobar) and a URL""" url = editable_req extras = None # If a file path is specified with extras, strip off the extras. m = re.match(r'^(.+)(\[[^\]]+\])$', url) if m: url_no_extras = m.group(1) extras = m.group(2) else: url_no_extras = url if os.path.isdir(url_no_extras): if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % url_no_extras) # Treating it as code that has already been checked out url_no_extras = path_to_url(url_no_extras) if url_no_extras.lower().startswith('file:'): if extras: return None, url_no_extras, pkg_resources.Requirement.parse('__placeholder__' + extras).extras else: return None, url_no_extras, None for version_control in vcs: if url.lower().startswith('%s:' % version_control): url = '%s+%s' % (version_control, url) break if '+' not in url: if default_vcs: url = default_vcs + '+' + url else: raise InstallationError( '%s should either be a path to a local project or a VCS url beginning with svn+, git+, hg+, or bzr+' % editable_req) vc_type = url.split('+', 1)[0].lower() if not vcs.get_backend(vc_type): error_message = 'For --editable=%s only ' % editable_req + \ ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ ' is currently supported' raise InstallationError(error_message) try: options = _build_editable_options(editable_req) except Exception: message = sys.exc_info()[1] raise InstallationError( '--editable=%s error in editable options:%s' % (editable_req, message)) if not options or 'egg' not in options: req = _build_req_from_url(editable_req) if not req: raise InstallationError('--editable=%s is not the right format; it must have #egg=Package' % editable_req) else: req = options['egg'] package = _strip_postfix(req) return package, url, options class UninstallPathSet(object): """A set of file paths to be removed in the uninstallation of a requirement.""" def __init__(self, dist): self.paths = set() self._refuse = set() self.pth = {} self.dist = dist self.save_dir = None self._moved_paths = [] def _permitted(self, path): """ Return True if the given path is one we are permitted to remove/modify, False otherwise. """ return is_local(path) def _can_uninstall(self): if not dist_is_local(self.dist): logger.notify("Not uninstalling %s at %s, outside environment %s" % (self.dist.project_name, normalize_path(self.dist.location), sys.prefix)) return False return True def add(self, path): path = normalize_path(path) if not os.path.exists(path): return if self._permitted(path): self.paths.add(path) else: self._refuse.add(path) # __pycache__ files can show up after 'installed-files.txt' is created, due to imports if os.path.splitext(path)[1] == '.py' and uses_pycache: self.add(imp.cache_from_source(path)) def add_pth(self, pth_file, entry): pth_file = normalize_path(pth_file) if self._permitted(pth_file): if pth_file not in self.pth: self.pth[pth_file] = UninstallPthEntries(pth_file) self.pth[pth_file].add(entry) else: self._refuse.add(pth_file) def compact(self, paths): """Compact a path set to contain the minimal number of paths necessary to contain all paths in the set. If /a/path/ and /a/path/to/a/file.txt are both in the set, leave only the shorter path.""" short_paths = set() for path in sorted(paths, key=len): if not any([(path.startswith(shortpath) and path[len(shortpath.rstrip(os.path.sep))] == os.path.sep) for shortpath in short_paths]): short_paths.add(path) return short_paths def _stash(self, path): return os.path.join( self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep)) def remove(self, auto_confirm=False): """Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).""" if not self._can_uninstall(): return if not self.paths: logger.notify("Can't uninstall '%s'. No files were found to uninstall." % self.dist.project_name) return logger.notify('Uninstalling %s:' % self.dist.project_name) logger.indent += 2 paths = sorted(self.compact(self.paths)) try: if auto_confirm: response = 'y' else: for path in paths: logger.notify(path) response = ask('Proceed (y/n)? ', ('y', 'n')) if self._refuse: logger.notify('Not removing or modifying (outside of prefix):') for path in self.compact(self._refuse): logger.notify(path) if response == 'y': self.save_dir = tempfile.mkdtemp(suffix='-uninstall', prefix='pip-') for path in paths: new_path = self._stash(path) logger.info('Removing file or directory %s' % path) self._moved_paths.append(path) renames(path, new_path) for pth in self.pth.values(): pth.remove() logger.notify('Successfully uninstalled %s' % self.dist.project_name) finally: logger.indent -= 2 def rollback(self): """Rollback the changes previously made by remove().""" if self.save_dir is None: logger.error("Can't roll back %s; was not uninstalled" % self.dist.project_name) return False logger.notify('Rolling back uninstall of %s' % self.dist.project_name) for path in self._moved_paths: tmp_path = self._stash(path) logger.info('Replacing %s' % path) renames(tmp_path, path) for pth in self.pth: pth.rollback() def commit(self): """Remove temporary save dir: rollback will no longer be possible.""" if self.save_dir is not None: rmtree(self.save_dir) self.save_dir = None self._moved_paths = [] class UninstallPthEntries(object): def __init__(self, pth_file): if not os.path.isfile(pth_file): raise UninstallationError("Cannot remove entries from nonexistent file %s" % pth_file) self.file = pth_file self.entries = set() self._saved_lines = None def add(self, entry): entry = os.path.normcase(entry) # On Windows, os.path.normcase converts the entry to use # backslashes. This is correct for entries that describe absolute # paths outside of site-packages, but all the others use forward # slashes. if sys.platform == 'win32' and not os.path.splitdrive(entry)[0]: entry = entry.replace('\\', '/') self.entries.add(entry) def remove(self): logger.info('Removing pth entries from %s:' % self.file) fh = open(self.file, 'rb') # windows uses '\r\n' with py3k, but uses '\n' with py2.x lines = fh.readlines() self._saved_lines = lines fh.close() if any(b('\r\n') in line for line in lines): endline = '\r\n' else: endline = '\n' for entry in self.entries: try: logger.info('Removing entry: %s' % entry) lines.remove(b(entry + endline)) except ValueError: pass fh = open(self.file, 'wb') fh.writelines(lines) fh.close() def rollback(self): if self._saved_lines is None: logger.error('Cannot roll back changes to %s, none were made' % self.file) return False logger.info('Rolling %s back to previous state' % self.file) fh = open(self.file, 'wb') fh.writelines(self._saved_lines) fh.close() return True class FakeFile(object): """Wrap a list of lines in an object with readline() to make ConfigParser happy.""" def __init__(self, lines): self._gen = (l for l in lines) def readline(self): try: try: return next(self._gen) except NameError: return self._gen.next() except StopIteration: return '' def __iter__(self): return self._gen
apache-2.0
pgonda/servo
tests/wpt/css-tests/tools/pywebsocket/src/test/test_handshake_hybi.py
413
22552
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for handshake module.""" import unittest import set_sys_path # Update sys.path to locate mod_pywebsocket module. from mod_pywebsocket import common from mod_pywebsocket.handshake._base import AbortedByUserException from mod_pywebsocket.handshake._base import HandshakeException from mod_pywebsocket.handshake._base import VersionException from mod_pywebsocket.handshake.hybi import Handshaker import mock class RequestDefinition(object): """A class for holding data for constructing opening handshake strings for testing the opening handshake processor. """ def __init__(self, method, uri, headers): self.method = method self.uri = uri self.headers = headers def _create_good_request_def(): return RequestDefinition( 'GET', '/demo', {'Host': 'server.example.com', 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==', 'Sec-WebSocket-Version': '13', 'Origin': 'http://example.com'}) def _create_request(request_def): conn = mock.MockConn('') return mock.MockRequest( method=request_def.method, uri=request_def.uri, headers_in=request_def.headers, connection=conn) def _create_handshaker(request): handshaker = Handshaker(request, mock.MockDispatcher()) return handshaker class SubprotocolChoosingDispatcher(object): """A dispatcher for testing. This dispatcher sets the i-th subprotocol of requested ones to ws_protocol where i is given on construction as index argument. If index is negative, default_value will be set to ws_protocol. """ def __init__(self, index, default_value=None): self.index = index self.default_value = default_value def do_extra_handshake(self, conn_context): if self.index >= 0: conn_context.ws_protocol = conn_context.ws_requested_protocols[ self.index] else: conn_context.ws_protocol = self.default_value def transfer_data(self, conn_context): pass class HandshakeAbortedException(Exception): pass class AbortingDispatcher(object): """A dispatcher for testing. This dispatcher raises an exception in do_extra_handshake to reject the request. """ def do_extra_handshake(self, conn_context): raise HandshakeAbortedException('An exception to reject the request') def transfer_data(self, conn_context): pass class AbortedByUserDispatcher(object): """A dispatcher for testing. This dispatcher raises an AbortedByUserException in do_extra_handshake to reject the request. """ def do_extra_handshake(self, conn_context): raise AbortedByUserException('An AbortedByUserException to reject the ' 'request') def transfer_data(self, conn_context): pass _EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Switching Protocols\r\n' 'Upgrade: websocket\r\n' 'Connection: Upgrade\r\n' 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n') class HandshakerTest(unittest.TestCase): """A unittest for draft-ietf-hybi-thewebsocketprotocol-06 and later handshake processor. """ def test_do_handshake(self): request = _create_request(_create_good_request_def()) dispatcher = mock.MockDispatcher() handshaker = Handshaker(request, dispatcher) handshaker.do_handshake() self.assertTrue(dispatcher.do_extra_handshake_called) self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) self.assertEqual('/demo', request.ws_resource) self.assertEqual('http://example.com', request.ws_origin) self.assertEqual(None, request.ws_protocol) self.assertEqual(None, request.ws_extensions) self.assertEqual(common.VERSION_HYBI_LATEST, request.ws_version) def test_do_handshake_with_extra_headers(self): request_def = _create_good_request_def() # Add headers not related to WebSocket opening handshake. request_def.headers['FooKey'] = 'BarValue' request_def.headers['EmptyKey'] = '' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) def test_do_handshake_with_capitalized_value(self): request_def = _create_good_request_def() request_def.headers['upgrade'] = 'WEBSOCKET' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) request_def = _create_good_request_def() request_def.headers['Connection'] = 'UPGRADE' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) def test_do_handshake_with_multiple_connection_values(self): request_def = _create_good_request_def() request_def.headers['Connection'] = 'Upgrade, keep-alive, , ' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( _EXPECTED_RESPONSE, request.connection.written_data()) def test_aborting_handshake(self): handshaker = Handshaker( _create_request(_create_good_request_def()), AbortingDispatcher()) # do_extra_handshake raises an exception. Check that it's not caught by # do_handshake. self.assertRaises(HandshakeAbortedException, handshaker.do_handshake) def test_do_handshake_with_protocol(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat' request = _create_request(request_def) handshaker = Handshaker(request, SubprotocolChoosingDispatcher(0)) handshaker.do_handshake() EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Switching Protocols\r\n' 'Upgrade: websocket\r\n' 'Connection: Upgrade\r\n' 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n' 'Sec-WebSocket-Protocol: chat\r\n\r\n') self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data()) self.assertEqual('chat', request.ws_protocol) def test_do_handshake_protocol_not_in_request_but_in_response(self): request_def = _create_good_request_def() request = _create_request(request_def) handshaker = Handshaker( request, SubprotocolChoosingDispatcher(-1, 'foobar')) # No request has been made but ws_protocol is set. HandshakeException # must be raised. self.assertRaises(HandshakeException, handshaker.do_handshake) def test_do_handshake_with_protocol_no_protocol_selection(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat' request = _create_request(request_def) handshaker = _create_handshaker(request) # ws_protocol is not set. HandshakeException must be raised. self.assertRaises(HandshakeException, handshaker.do_handshake) def test_do_handshake_with_extensions(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( 'permessage-compress; method=deflate, unknown') EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Switching Protocols\r\n' 'Upgrade: websocket\r\n' 'Connection: Upgrade\r\n' 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n' 'Sec-WebSocket-Extensions: permessage-compress; method=deflate\r\n' '\r\n') request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data()) self.assertEqual(1, len(request.ws_extensions)) extension = request.ws_extensions[0] self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, extension.name()) self.assertEqual(['method'], extension.get_parameter_names()) self.assertEqual('deflate', extension.get_parameter_value('method')) self.assertEqual(1, len(request.ws_extension_processors)) self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, request.ws_extension_processors[0].name()) def test_do_handshake_with_permessage_compress(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( 'permessage-compress; method=deflate') request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(1, len(request.ws_extensions)) self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, request.ws_extensions[0].name()) self.assertEqual(1, len(request.ws_extension_processors)) self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, request.ws_extension_processors[0].name()) def test_do_handshake_with_quoted_extensions(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( 'permessage-compress; method=deflate, , ' 'unknown; e = "mc^2"; ma="\r\n \\\rf "; pv=nrt') request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(2, len(request.ws_requested_extensions)) first_extension = request.ws_requested_extensions[0] self.assertEqual('permessage-compress', first_extension.name()) self.assertEqual(['method'], first_extension.get_parameter_names()) self.assertEqual('deflate', first_extension.get_parameter_value('method')) second_extension = request.ws_requested_extensions[1] self.assertEqual('unknown', second_extension.name()) self.assertEqual( ['e', 'ma', 'pv'], second_extension.get_parameter_names()) self.assertEqual('mc^2', second_extension.get_parameter_value('e')) self.assertEqual(' \rf ', second_extension.get_parameter_value('ma')) self.assertEqual('nrt', second_extension.get_parameter_value('pv')) def test_do_handshake_with_optional_headers(self): request_def = _create_good_request_def() request_def.headers['EmptyValue'] = '' request_def.headers['AKey'] = 'AValue' request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual( 'AValue', request.headers_in['AKey']) self.assertEqual( '', request.headers_in['EmptyValue']) def test_abort_extra_handshake(self): handshaker = Handshaker( _create_request(_create_good_request_def()), AbortedByUserDispatcher()) # do_extra_handshake raises an AbortedByUserException. Check that it's # not caught by do_handshake. self.assertRaises(AbortedByUserException, handshaker.do_handshake) def test_do_handshake_with_mux_and_deflate_frame(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % ( common.MUX_EXTENSION, common.DEFLATE_FRAME_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() # mux should be rejected. self.assertEqual(1, len(request.ws_extensions)) self.assertEqual(common.DEFLATE_FRAME_EXTENSION, request.ws_extensions[0].name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual(common.DEFLATE_FRAME_EXTENSION, request.ws_extension_processors[1].name()) self.assertFalse(hasattr(request, 'mux_processor')) def test_do_handshake_with_deflate_frame_and_mux(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % ( common.DEFLATE_FRAME_EXTENSION, common.MUX_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() # mux should be rejected. self.assertEqual(1, len(request.ws_extensions)) first_extension = request.ws_extensions[0] self.assertEqual(common.DEFLATE_FRAME_EXTENSION, first_extension.name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.DEFLATE_FRAME_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[1].name()) self.assertFalse(hasattr(request, 'mux')) def test_do_handshake_with_permessage_compress_and_mux(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( '%s; method=deflate, %s' % ( common.PERMESSAGE_COMPRESSION_EXTENSION, common.MUX_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() self.assertEqual(1, len(request.ws_extensions)) self.assertEqual(common.MUX_EXTENSION, request.ws_extensions[0].name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[1].name()) self.assertTrue(hasattr(request, 'mux_processor')) self.assertTrue(request.mux_processor.is_active()) mux_extensions = request.mux_processor.extensions() self.assertEqual(1, len(mux_extensions)) self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, mux_extensions[0].name()) def test_do_handshake_with_mux_and_permessage_compress(self): request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Extensions'] = ( '%s, %s; method=deflate' % ( common.MUX_EXTENSION, common.PERMESSAGE_COMPRESSION_EXTENSION)) request = _create_request(request_def) handshaker = _create_handshaker(request) handshaker.do_handshake() # mux should be rejected. self.assertEqual(1, len(request.ws_extensions)) first_extension = request.ws_extensions[0] self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, first_extension.name()) self.assertEqual(2, len(request.ws_extension_processors)) self.assertEqual(common.MUX_EXTENSION, request.ws_extension_processors[0].name()) self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION, request.ws_extension_processors[1].name()) self.assertFalse(hasattr(request, 'mux_processor')) def test_bad_requests(self): bad_cases = [ ('HTTP request', RequestDefinition( 'GET', '/demo', {'Host': 'www.google.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;' ' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3' ' GTB6 GTBA', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,' '*/*;q=0.8', 'Accept-Language': 'en-us,en;q=0.5', 'Accept-Encoding': 'gzip,deflate', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': '300', 'Connection': 'keep-alive'}), None, True)] request_def = _create_good_request_def() request_def.method = 'POST' bad_cases.append(('Wrong method', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Host'] bad_cases.append(('Missing Host', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Upgrade'] bad_cases.append(('Missing Upgrade', request_def, None, True)) request_def = _create_good_request_def() request_def.headers['Upgrade'] = 'nonwebsocket' bad_cases.append(('Wrong Upgrade', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Connection'] bad_cases.append(('Missing Connection', request_def, None, True)) request_def = _create_good_request_def() request_def.headers['Connection'] = 'Downgrade' bad_cases.append(('Wrong Connection', request_def, None, True)) request_def = _create_good_request_def() del request_def.headers['Sec-WebSocket-Key'] bad_cases.append(('Missing Sec-WebSocket-Key', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Key'] = ( 'dGhlIHNhbXBsZSBub25jZQ==garbage') bad_cases.append(('Wrong Sec-WebSocket-Key (with garbage on the tail)', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Key'] = 'YQ==' # BASE64 of 'a' bad_cases.append( ('Wrong Sec-WebSocket-Key (decoded value is not 16 octets long)', request_def, 400, True)) request_def = _create_good_request_def() # The last character right before == must be any of A, Q, w and g. request_def.headers['Sec-WebSocket-Key'] = ( 'AQIDBAUGBwgJCgsMDQ4PEC==') bad_cases.append( ('Wrong Sec-WebSocket-Key (padding bits are not zero)', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Key'] = ( 'dGhlIHNhbXBsZSBub25jZQ==,dGhlIHNhbXBsZSBub25jZQ==') bad_cases.append( ('Wrong Sec-WebSocket-Key (multiple values)', request_def, 400, True)) request_def = _create_good_request_def() del request_def.headers['Sec-WebSocket-Version'] bad_cases.append(('Missing Sec-WebSocket-Version', request_def, None, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Version'] = '3' bad_cases.append(('Wrong Sec-WebSocket-Version', request_def, None, False)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Version'] = '13, 13' bad_cases.append(('Wrong Sec-WebSocket-Version (multiple values)', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = 'illegal\x09protocol' bad_cases.append(('Illegal Sec-WebSocket-Protocol', request_def, 400, True)) request_def = _create_good_request_def() request_def.headers['Sec-WebSocket-Protocol'] = '' bad_cases.append(('Empty Sec-WebSocket-Protocol', request_def, 400, True)) for (case_name, request_def, expected_status, expect_handshake_exception) in bad_cases: request = _create_request(request_def) handshaker = Handshaker(request, mock.MockDispatcher()) try: handshaker.do_handshake() self.fail('No exception thrown for \'%s\' case' % case_name) except HandshakeException, e: self.assertTrue(expect_handshake_exception) self.assertEqual(expected_status, e.status) except VersionException, e: self.assertFalse(expect_handshake_exception) if __name__ == '__main__': unittest.main() # vi:sts=4 sw=4 et
mpl-2.0
spanner888/madparts
coffee/library.py
1
1940
# (c) 2013 Joost Yervante Damad <[email protected]> # License: GPL import os, os.path, glob import coffee.pycoffee as pycoffee class Meta: def __init__(self, meta): if not 'desc' in meta: meta['desc'] = '' if not 'parent' in meta: meta['parent'] = None self.meta = meta for k in meta: self.__dict__[k] = meta[k] self.child_ids = [] class Library: def __init__(self, name, directory): self.name = name self.directory = directory self.exists = os.path.exists(self.directory) self.is_dir = True self.readonly = False if self.exists: self.is_dir = os.path.isdir(self.directory) self.readonly = not os.access(self.directory, os.W_OK) self.meta_list = [] self.fail_list = [] self.meta_by_id = {} self.scan() def scan(self, select_id = None): self.meta_list = [] self.fail_list = [] if not self.exists: return for path in glob.glob(self.directory + '/*.coffee'): with open(path) as f: code = f.read() meta = pycoffee.eval_coffee_meta(code) if not 'name' in meta or not 'id' in meta: self.fail_list.append(meta) continue meta['readonly'] = not os.access(path, os.W_OK) meta['filename'] = path self.meta_list.append(meta) self.meta_list = [Meta(meta) for meta in self.meta_list] self.meta_list.sort(key=lambda x: x.name) self.meta_by_id = {} for meta in self.meta_list: self.meta_by_id[meta.id] = meta self.meta_by_name = {} for meta in self.meta_list: self.meta_by_name[meta.name] = meta # scan child relationships found_as_child = [] for meta in self.meta_list: if meta.parent != None and meta.parent in self.meta_by_id: self.meta_by_id[meta.parent].child_ids.append(meta.id) found_as_child.append(meta.id) self.root_meta_list = filter(lambda meta: meta.id not in found_as_child, self.meta_list)
gpl-3.0
martynovp/edx-platform
cms/djangoapps/contentstore/features/course-updates.py
95
4707
# pylint: disable=missing-docstring # pylint: disable=redefined-outer-name from lettuce import world, step from selenium.webdriver.common.keys import Keys from common import type_in_codemirror, get_codemirror_value from nose.tools import assert_in # pylint: disable=no-name-in-module @step(u'I go to the course updates page') def go_to_updates(_step): menu_css = 'li.nav-course-courseware' updates_css = 'li.nav-course-courseware-updates a' world.css_click(menu_css) world.css_click(updates_css) @step(u'I add a new update with the text "([^"]*)"$') def add_update(_step, text): update_css = 'a.new-update-button' world.css_click(update_css) change_text(text) @step(u'I should see the update "([^"]*)"$') def check_update(_step, text): update_css = 'div.update-contents' update_html = world.css_find(update_css).html assert_in(text, update_html) @step(u'I should see the asset update to "([^"]*)"$') def check_asset_update(_step, asset_file): update_css = 'div.update-contents' update_html = world.css_find(update_css).html asset_key = world.scenario_dict['COURSE'].id.make_asset_key(asset_type='asset', path=asset_file) assert_in(unicode(asset_key), update_html) @step(u'I should not see the update "([^"]*)"$') def check_no_update(_step, text): update_css = 'div.update-contents' assert world.is_css_not_present(update_css) @step(u'I modify the text to "([^"]*)"$') def modify_update(_step, text): button_css = 'div.post-preview .edit-button' world.css_click(button_css) change_text(text) @step(u'I change the update from "([^"]*)" to "([^"]*)"$') def change_existing_update(_step, before, after): verify_text_in_editor_and_update('div.post-preview .edit-button', before, after) @step(u'I change the handout from "([^"]*)" to "([^"]*)"$') def change_existing_handout(_step, before, after): verify_text_in_editor_and_update('div.course-handouts .edit-button', before, after) @step(u'I delete the update$') def click_button(_step): button_css = 'div.post-preview .delete-button' world.css_click(button_css) @step(u'I edit the date to "([^"]*)"$') def change_date(_step, new_date): button_css = 'div.post-preview .edit-button' world.css_click(button_css) date_css = 'input.date' date = world.css_find(date_css) for i in range(len(date.value)): date._element.send_keys(Keys.END, Keys.BACK_SPACE) date._element.send_keys(new_date) save_css = '.save-button' world.css_click(save_css) @step(u'I should see the date "([^"]*)"$') def check_date(_step, date): date_css = 'span.date-display' assert_in(date, world.css_html(date_css)) @step(u'I modify the handout to "([^"]*)"$') def edit_handouts(_step, text): edit_css = 'div.course-handouts > .edit-button' world.css_click(edit_css) change_text(text) @step(u'I see the handout "([^"]*)"$') def check_handout(_step, handout): handout_css = 'div.handouts-content' assert_in(handout, world.css_html(handout_css)) @step(u'I see the handout image link "([^"]*)"$') def check_handout_image_link(_step, image_file): handout_css = 'div.handouts-content' handout_html = world.css_html(handout_css) asset_key = world.scenario_dict['COURSE'].id.make_asset_key(asset_type='asset', path=image_file) assert_in(unicode(asset_key), handout_html) @step(u'I see the handout error text') def check_handout_error(_step): handout_error_css = 'div#handout_error' assert world.css_has_class(handout_error_css, 'is-shown') @step(u'I see handout save button disabled') def check_handout_error(_step): handout_save_button = 'form.edit-handouts-form .save-button' assert world.css_has_class(handout_save_button, 'is-disabled') @step(u'I edit the handout to "([^"]*)"$') def edit_handouts(_step, text): type_in_codemirror(0, text) @step(u'I see handout save button re-enabled') def check_handout_error(_step): handout_save_button = 'form.edit-handouts-form .save-button' assert not world.css_has_class(handout_save_button, 'is-disabled') @step(u'I save handout edit') def check_handout_error(_step): save_css = '.save-button' world.css_click(save_css) def change_text(text): type_in_codemirror(0, text) save_css = '.save-button' world.css_click(save_css) def verify_text_in_editor_and_update(button_css, before, after): world.css_click(button_css) text = get_codemirror_value() assert_in(before, text) change_text(after) @step('I see a "(saving|deleting)" notification') def i_see_a_mini_notification(_step, _type): saving_css = '.wrapper-notification-mini' assert world.is_css_present(saving_css)
agpl-3.0
taedla01/MissionPlanner
Lib/unittest/util.py
60
4762
"""Various utility functions.""" from collections import namedtuple, OrderedDict __unittest = True _MAX_LENGTH = 80 def safe_repr(obj, short=False): try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH] + ' [truncated]...' def strclass(cls): return "%s.%s" % (cls.__module__, cls.__name__) def sorted_list_difference(expected, actual): """Finds elements in only one or the other of two, sorted input lists. Returns a two-element tuple of lists. The first list contains those elements in the "expected" list but not in the "actual" list, and the second contains those elements in the "actual" list but not in the "expected" list. Duplicate elements in either input list are ignored. """ i = j = 0 missing = [] unexpected = [] while True: try: e = expected[i] a = actual[j] if e < a: missing.append(e) i += 1 while expected[i] == e: i += 1 elif e > a: unexpected.append(a) j += 1 while actual[j] == a: j += 1 else: i += 1 try: while expected[i] == e: i += 1 finally: j += 1 while actual[j] == a: j += 1 except IndexError: missing.extend(expected[i:]) unexpected.extend(actual[j:]) break return missing, unexpected def unorderable_list_difference(expected, actual, ignore_duplicate=False): """Same behavior as sorted_list_difference but for lists of unorderable items (like dicts). As it does a linear search per item (remove) it has O(n*n) performance. """ missing = [] unexpected = [] while expected: item = expected.pop() try: actual.remove(item) except ValueError: missing.append(item) if ignore_duplicate: for lst in expected, actual: try: while True: lst.remove(item) except ValueError: pass if ignore_duplicate: while actual: item = actual.pop() unexpected.append(item) try: while True: actual.remove(item) except ValueError: pass return missing, unexpected # anything left in actual is unexpected return missing, actual _Mismatch = namedtuple('Mismatch', 'actual expected value') def _count_diff_all_purpose(actual, expected): 'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ' # elements need not be hashable s, t = list(actual), list(expected) m, n = len(s), len(t) NULL = object() result = [] for i, elem in enumerate(s): if elem is NULL: continue cnt_s = cnt_t = 0 for j in range(i, m): if s[j] == elem: cnt_s += 1 s[j] = NULL for j, other_elem in enumerate(t): if other_elem == elem: cnt_t += 1 t[j] = NULL if cnt_s != cnt_t: diff = _Mismatch(cnt_s, cnt_t, elem) result.append(diff) for i, elem in enumerate(t): if elem is NULL: continue cnt_t = 0 for j in range(i, n): if t[j] == elem: cnt_t += 1 t[j] = NULL diff = _Mismatch(0, cnt_t, elem) result.append(diff) return result def _ordered_count(iterable): 'Return dict of element counts, in the order they were first seen' c = OrderedDict() for elem in iterable: c[elem] = c.get(elem, 0) + 1 return c def _count_diff_hashable(actual, expected): 'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ' # elements must be hashable s, t = _ordered_count(actual), _ordered_count(expected) result = [] for elem, cnt_s in s.items(): cnt_t = t.get(elem, 0) if cnt_s != cnt_t: diff = _Mismatch(cnt_s, cnt_t, elem) result.append(diff) for elem, cnt_t in t.items(): if elem not in s: diff = _Mismatch(0, cnt_t, elem) result.append(diff) return result
gpl-3.0
aristotle-tek/cuny-bdif
AWS/ec2/lib/boto-2.34.0/boto/cloudfront/origin.py
153
6060
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.cloudfront.identity import OriginAccessIdentity def get_oai_value(origin_access_identity): if isinstance(origin_access_identity, OriginAccessIdentity): return origin_access_identity.uri() else: return origin_access_identity class S3Origin(object): """ Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then you use the S3Origin element. """ def __init__(self, dns_name=None, origin_access_identity=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param origin_access_identity: The CloudFront origin access identity to associate with the distribution. If you want the distribution to serve private content, include this element; if you want the distribution to serve public content, remove this element. :type origin_access_identity: str """ self.dns_name = dns_name self.origin_access_identity = origin_access_identity def __repr__(self): return '<S3Origin: %s>' % self.dns_name def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'DNSName': self.dns_name = value elif name == 'OriginAccessIdentity': self.origin_access_identity = value else: setattr(self, name, value) def to_xml(self): s = ' <S3Origin>\n' s += ' <DNSName>%s</DNSName>\n' % self.dns_name if self.origin_access_identity: val = get_oai_value(self.origin_access_identity) s += ' <OriginAccessIdentity>%s</OriginAccessIdentity>\n' % val s += ' </S3Origin>\n' return s class CustomOrigin(object): """ Origin information to associate with the distribution. If your distribution will use a non-Amazon S3 origin, then you use the CustomOrigin element. """ def __init__(self, dns_name=None, http_port=80, https_port=443, origin_protocol_policy=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param http_port: The HTTP port the custom origin listens on. :type http_port: int :param https_port: The HTTPS port the custom origin listens on. :type http_port: int :param origin_protocol_policy: The origin protocol policy to apply to your origin. If you specify http-only, CloudFront will use HTTP only to access the origin. If you specify match-viewer, CloudFront will fetch from your origin using HTTP or HTTPS, based on the protocol of the viewer request. :type origin_protocol_policy: str """ self.dns_name = dns_name self.http_port = http_port self.https_port = https_port self.origin_protocol_policy = origin_protocol_policy def __repr__(self): return '<CustomOrigin: %s>' % self.dns_name def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'DNSName': self.dns_name = value elif name == 'HTTPPort': try: self.http_port = int(value) except ValueError: self.http_port = value elif name == 'HTTPSPort': try: self.https_port = int(value) except ValueError: self.https_port = value elif name == 'OriginProtocolPolicy': self.origin_protocol_policy = value else: setattr(self, name, value) def to_xml(self): s = ' <CustomOrigin>\n' s += ' <DNSName>%s</DNSName>\n' % self.dns_name s += ' <HTTPPort>%d</HTTPPort>\n' % self.http_port s += ' <HTTPSPort>%d</HTTPSPort>\n' % self.https_port s += ' <OriginProtocolPolicy>%s</OriginProtocolPolicy>\n' % self.origin_protocol_policy s += ' </CustomOrigin>\n' return s
mit
xuxiao19910803/edx-platform
cms/djangoapps/contentstore/tests/test_clone_course.py
147
6812
""" Unit tests for cloning a course between the same and different module stores. """ import json from django.conf import settings from opaque_keys.edx.locator import CourseLocator from xmodule.modulestore import ModuleStoreEnum, EdxJSONEncoder from contentstore.tests.utils import CourseTestCase from contentstore.tasks import rerun_course from student.auth import has_course_author_access from course_action_state.models import CourseRerunState from course_action_state.managers import CourseRerunUIStateManager from mock import patch, Mock from xmodule.contentstore.content import StaticContent from xmodule.contentstore.django import contentstore from xmodule.modulestore.tests.factories import CourseFactory TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT class CloneCourseTest(CourseTestCase): """ Unit tests for cloning a course """ def test_clone_course(self): """Tests cloning of a course as follows: XML -> Mongo (+ data) -> Mongo -> Split -> Split""" # 1. import and populate test toy course mongo_course1_id = self.import_and_populate_course() # 2. clone course (mongo -> mongo) # TODO - This is currently failing since clone_course doesn't handle Private content - fails on Publish # mongo_course2_id = SlashSeparatedCourseKey('edX2', 'toy2', '2013_Fall') # self.store.clone_course(mongo_course1_id, mongo_course2_id, self.user.id) # self.assertCoursesEqual(mongo_course1_id, mongo_course2_id) # self.check_populated_course(mongo_course2_id) # NOTE: When the code above is uncommented this can be removed. mongo_course2_id = mongo_course1_id # 3. clone course (mongo -> split) with self.store.default_store(ModuleStoreEnum.Type.split): split_course3_id = CourseLocator( org="edx3", course="split3", run="2013_Fall" ) self.store.clone_course(mongo_course2_id, split_course3_id, self.user.id) self.assertCoursesEqual(mongo_course2_id, split_course3_id) # 4. clone course (split -> split) split_course4_id = CourseLocator( org="edx4", course="split4", run="2013_Fall" ) self.store.clone_course(split_course3_id, split_course4_id, self.user.id) self.assertCoursesEqual(split_course3_id, split_course4_id) def test_space_in_asset_name_for_rerun_course(self): """ Tests check the scenario where one course which has an asset with percentage(%) in its name, it should re-run successfully. """ org = 'edX' course_number = 'CS101' course_run = '2015_Q1' display_name = 'rerun' fields = {'display_name': display_name} course_assets = set([u'subs_Introduction%20To%20New.srt.sjson'], ) # Create a course using split modulestore course = CourseFactory.create( org=org, number=course_number, run=course_run, display_name=display_name, default_store=ModuleStoreEnum.Type.split ) # add an asset asset_key = course.id.make_asset_key('asset', 'subs_Introduction%20To%20New.srt.sjson') content = StaticContent( asset_key, 'Dummy assert', 'application/json', 'dummy data', ) contentstore().save(content) # Get & verify all assets of the course assets, count = contentstore().get_all_content_for_course(course.id) self.assertEqual(count, 1) self.assertEqual(set([asset['asset_key'].block_id for asset in assets]), course_assets) # rerun from split into split split_rerun_id = CourseLocator(org=org, course=course_number, run="2012_Q2") CourseRerunState.objects.initiated(course.id, split_rerun_id, self.user, fields['display_name']) result = rerun_course.delay( unicode(course.id), unicode(split_rerun_id), self.user.id, json.dumps(fields, cls=EdxJSONEncoder) ) # Check if re-run was successful self.assertEqual(result.get(), "succeeded") rerun_state = CourseRerunState.objects.find_first(course_key=split_rerun_id) self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED) def test_rerun_course(self): """ Unit tests for :meth: `contentstore.tasks.rerun_course` """ mongo_course1_id = self.import_and_populate_course() # rerun from mongo into split split_course3_id = CourseLocator( org="edx3", course="split3", run="rerun_test" ) # Mark the action as initiated fields = {'display_name': 'rerun'} CourseRerunState.objects.initiated(mongo_course1_id, split_course3_id, self.user, fields['display_name']) result = rerun_course.delay(unicode(mongo_course1_id), unicode(split_course3_id), self.user.id, json.dumps(fields, cls=EdxJSONEncoder)) self.assertEqual(result.get(), "succeeded") self.assertTrue(has_course_author_access(self.user, split_course3_id), "Didn't grant access") rerun_state = CourseRerunState.objects.find_first(course_key=split_course3_id) self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED) # try creating rerunning again to same name and ensure it generates error result = rerun_course.delay(unicode(mongo_course1_id), unicode(split_course3_id), self.user.id) self.assertEqual(result.get(), "duplicate course") # the below will raise an exception if the record doesn't exist CourseRerunState.objects.find_first( course_key=split_course3_id, state=CourseRerunUIStateManager.State.FAILED ) # try to hit the generic exception catch with patch('xmodule.modulestore.split_mongo.mongo_connection.MongoConnection.insert_course_index', Mock(side_effect=Exception)): split_course4_id = CourseLocator(org="edx3", course="split3", run="rerun_fail") fields = {'display_name': 'total failure'} CourseRerunState.objects.initiated(split_course3_id, split_course4_id, self.user, fields['display_name']) result = rerun_course.delay(unicode(split_course3_id), unicode(split_course4_id), self.user.id, json.dumps(fields, cls=EdxJSONEncoder)) self.assertIn("exception: ", result.get()) self.assertIsNone(self.store.get_course(split_course4_id), "Didn't delete course after error") CourseRerunState.objects.find_first( course_key=split_course4_id, state=CourseRerunUIStateManager.State.FAILED )
agpl-3.0
mephizzle/wagtail
wagtail/wagtailadmin/tests/test_account_management.py
25
19261
from __future__ import unicode_literals from django.test import TestCase from django.core.urlresolvers import reverse from django.contrib.auth import get_user_model from django.contrib.auth.models import Group, Permission from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.core import mail from wagtail.tests.utils import WagtailTestUtils from wagtail.wagtailusers.models import UserProfile class TestAuthentication(TestCase, WagtailTestUtils): """ This tests that users can login and logout of the admin interface """ def test_login_view(self): """ This tests that the login view responds with a login page """ # Get login page response = self.client.get(reverse('wagtailadmin_login')) # Check that the user recieved a login page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/login.html') def test_login_view_post(self): """ This posts user credentials to the login view and checks that the user was logged in successfully """ # Create user to log in with get_user_model().objects.create_superuser(username='test', email='[email protected]', password='password') # Post credentials to the login page response = self.client.post(reverse('wagtailadmin_login'), { 'username': 'test', 'password': 'password', # NOTE: This is set using a hidden field in reality 'next': reverse('wagtailadmin_home'), }) # Check that the user was redirected to the dashboard self.assertRedirects(response, reverse('wagtailadmin_home')) # Check that the user was logged in self.assertTrue('_auth_user_id' in self.client.session) self.assertEqual(str(self.client.session['_auth_user_id']), str(get_user_model().objects.get(username='test').id)) def test_already_logged_in_redirect(self): """ This tests that a user who is already logged in is automatically redirected to the admin dashboard if they try to access the login page """ # Login self.login() # Get login page response = self.client.get(reverse('wagtailadmin_login')) # Check that the user was redirected to the dashboard self.assertRedirects(response, reverse('wagtailadmin_home')) def test_logged_in_as_non_privileged_user_doesnt_redirect(self): """ This tests that if the user is logged in but hasn't got permission to access the admin, they are not redirected to the admin This tests issue #431 """ # Login as unprivileged user get_user_model().objects.create(username='unprivileged', password='123') self.client.login(username='unprivileged', password='123') # Get login page response = self.client.get(reverse('wagtailadmin_login')) # Check that the user recieved a login page and was not redirected self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/login.html') def test_logout(self): """ This tests that the user can logout """ # Login self.login() # Get logout page response = self.client.get(reverse('wagtailadmin_logout')) # Check that the user was redirected to the login page self.assertRedirects(response, reverse('wagtailadmin_login')) # Check that the user was logged out self.assertFalse('_auth_user_id' in self.client.session) def test_not_logged_in_redirect(self): """ This tests that a not logged in user is redirected to the login page """ # Get dashboard response = self.client.get(reverse('wagtailadmin_home')) # Check that the user was redirected to the login page and that next was set correctly self.assertRedirects(response, reverse('wagtailadmin_login') + '?next=' + reverse('wagtailadmin_home')) def test_not_logged_in_redirect_default_settings(self): """ This does the same as the above test but checks that it redirects to the correct place when the user has not set the LOGIN_URL setting correctly """ # Get dashboard with default LOGIN_URL setting with self.settings(LOGIN_URL='django.contrib.auth.views.login'): response = self.client.get(reverse('wagtailadmin_home')) # Check that the user was redirected to the login page and that next was set correctly # Note: The user will be redirected to 'django.contrib.auth.views.login' but # this must be the same URL as 'wagtailadmin_login' self.assertEqual(response.status_code, 302) self.assertRedirects(response, reverse('wagtailadmin_login') + '?next=' + reverse('wagtailadmin_home')) class TestAccountSection(TestCase, WagtailTestUtils): """ This tests that the accounts section is working """ def setUp(self): self.login() def test_account_view(self): """ This tests that the login view responds with a login page """ # Get account page response = self.client.get(reverse('wagtailadmin_account')) # Check that the user recieved an account page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/account.html') def test_change_password_view(self): """ This tests that the change password view responds with a change password page """ # Get change password page response = self.client.get(reverse('wagtailadmin_account_change_password')) # Check that the user recieved a change password page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/change_password.html') def test_change_password_view_post(self): """ This posts a new password to the change password view and checks that the users password was changed """ # Post new password to change password page post_data = { 'new_password1': 'newpassword', 'new_password2': 'newpassword', } response = self.client.post(reverse('wagtailadmin_account_change_password'), post_data) # Check that the user was redirected to the account page self.assertRedirects(response, reverse('wagtailadmin_account')) # Check that the password was changed self.assertTrue(get_user_model().objects.get(username='test').check_password('newpassword')) def test_change_password_view_post_password_mismatch(self): """ This posts a two passwords that don't match to the password change view and checks that a validation error was raised """ # Post new password to change password page post_data = { 'new_password1': 'newpassword', 'new_password2': 'badpassword', } response = self.client.post(reverse('wagtailadmin_account_change_password'), post_data) # Check that the user wasn't redirected self.assertEqual(response.status_code, 200) # Check that a validation error was raised self.assertTrue('new_password2' in response.context['form'].errors.keys()) self.assertTrue("The two password fields didn't match." in response.context['form'].errors['new_password2']) # Check that the password was not changed self.assertTrue(get_user_model().objects.get(username='test').check_password('password')) def test_notification_preferences_view(self): """ This tests that the notification preferences view responds with the notification preferences page """ # Get notification preferences page response = self.client.get(reverse('wagtailadmin_account_notification_preferences')) # Check that the user recieved a notification preferences page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/notification_preferences.html') def test_notification_preferences_view_post(self): """ This posts to the notification preferences view and checks that the user's profile is updated """ # Post new values to the notification preferences page post_data = { 'submitted_notifications': 'false', 'approved_notifications': 'false', 'rejected_notifications': 'true', } response = self.client.post(reverse('wagtailadmin_account_notification_preferences'), post_data) # Check that the user was redirected to the account page self.assertRedirects(response, reverse('wagtailadmin_account')) profile = UserProfile.get_for_user(get_user_model().objects.get(username='test')) # Check that the notification preferences are as submitted self.assertFalse(profile.submitted_notifications) self.assertFalse(profile.approved_notifications) self.assertTrue(profile.rejected_notifications) class TestAccountManagementForNonModerator(TestCase, WagtailTestUtils): """ Tests of reduced-functionality for editors """ def setUp(self): # Create a non-moderator user self.submitter = get_user_model().objects.create_user('submitter', '[email protected]', 'password') self.submitter.groups.add(Group.objects.get(name='Editors')) self.client.login(username=self.submitter.username, password='password') def test_notification_preferences_form_is_reduced_for_non_moderators(self): """ This tests that a user without publish permissions is not shown the notification preference for 'submitted' items """ response = self.client.get(reverse('wagtailadmin_account_notification_preferences')) self.assertIn('approved_notifications', response.context['form'].fields.keys()) self.assertIn('rejected_notifications', response.context['form'].fields.keys()) self.assertNotIn('submitted_notifications', response.context['form'].fields.keys()) class TestAccountManagementForAdminOnlyUser(TestCase, WagtailTestUtils): """ Tests for users with no edit/publish permissions at all """ def setUp(self): # Create a non-moderator user admin_only_group = Group.objects.create(name='Admin Only') admin_only_group.permissions.add(Permission.objects.get(codename='access_admin')) self.admin_only_user = get_user_model().objects.create_user('admin_only_user', '[email protected]', 'password') self.admin_only_user.groups.add(admin_only_group) self.client.login(username=self.admin_only_user.username, password='password') def test_notification_preferences_view_redirects_for_admin_only_users(self): """ Test that the user is not shown the notification preferences view but instead redirected to the account page """ response = self.client.get(reverse('wagtailadmin_account_notification_preferences')) self.assertRedirects(response, reverse('wagtailadmin_account')) def test_notification_preferences_link_not_shown_for_admin_only_users(self): """ Test that the user is not even shown the link to the notification preferences view """ response = self.client.get(reverse('wagtailadmin_account')) self.assertEqual(response.context['show_notification_preferences'], False) self.assertNotContains(response, reverse('wagtailadmin_account_notification_preferences')) # safety check that checking for absence/presence of urls works self.assertContains(response, reverse('wagtailadmin_home')) class TestPasswordReset(TestCase, WagtailTestUtils): """ This tests that the password reset is working """ def setUp(self): # Create a user get_user_model().objects.create_superuser(username='test', email='[email protected]', password='password') def test_password_reset_view(self): """ This tests that the password reset view returns a password reset page """ # Get password reset page response = self.client.get(reverse('wagtailadmin_password_reset')) # Check that the user recieved a password reset page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/form.html') def test_password_reset_view_post(self): """ This posts an email address to the password reset view and checks that a password reset email was sent """ # Post email address to password reset view post_data = { 'email': '[email protected]', } response = self.client.post(reverse('wagtailadmin_password_reset'), post_data) # Check that the user was redirected to the done page self.assertRedirects(response, reverse('wagtailadmin_password_reset_done')) # Check that a password reset email was sent to the user self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['[email protected]']) self.assertEqual(mail.outbox[0].subject, "Password reset") def test_password_reset_view_post_unknown_email(self): """ This posts an unknown email address to the password reset view and checks that the password reset form raises a validation error """ post_data = { 'email': '[email protected]', } response = self.client.post(reverse('wagtailadmin_password_reset'), post_data) # Check that the user wasn't redirected self.assertEqual(response.status_code, 200) # Check that a validation error was raised self.assertTrue('__all__' in response.context['form'].errors.keys()) self.assertTrue("This email address is not recognised." in response.context['form'].errors['__all__']) # Check that an email was not sent self.assertEqual(len(mail.outbox), 0) def test_password_reset_view_post_invalid_email(self): """ This posts an incalid email address to the password reset view and checks that the password reset form raises a validation error """ post_data = { 'email': 'Hello world!', } response = self.client.post(reverse('wagtailadmin_password_reset'), post_data) # Check that the user wasn't redirected self.assertEqual(response.status_code, 200) # Check that a validation error was raised self.assertTrue('email' in response.context['form'].errors.keys()) self.assertTrue("Enter a valid email address." in response.context['form'].errors['email']) # Check that an email was not sent self.assertEqual(len(mail.outbox), 0) def setup_password_reset_confirm_tests(self): from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_encode # Get user self.user = get_user_model().objects.get(username='test') # Generate a password reset token self.password_reset_token = PasswordResetTokenGenerator().make_token(self.user) # Generate a password reset uid self.password_reset_uid = urlsafe_base64_encode(force_bytes(self.user.pk)) # Create url_args self.url_kwargs = dict(uidb64=self.password_reset_uid, token=self.password_reset_token) def test_password_reset_confirm_view(self): """ This tests that the password reset confirm view returns a password reset confirm page """ self.setup_password_reset_confirm_tests() # Get password reset confirm page response = self.client.get(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs)) # Check that the user recieved a password confirm done page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/confirm.html') def test_password_reset_confirm_view_post(self): """ This posts a new password to the password reset confirm view and checks that the users password was changed """ self.setup_password_reset_confirm_tests() # Post new password to change password page post_data = { 'new_password1': 'newpassword', 'new_password2': 'newpassword', } response = self.client.post(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs), post_data) # Check that the user was redirected to the complete page self.assertRedirects(response, reverse('wagtailadmin_password_reset_complete')) # Check that the password was changed self.assertTrue(get_user_model().objects.get(username='test').check_password('newpassword')) def test_password_reset_confirm_view_post_password_mismatch(self): """ This posts a two passwords that don't match to the password reset confirm view and checks that a validation error was raised """ self.setup_password_reset_confirm_tests() # Post new password to change password page post_data = { 'new_password1': 'newpassword', 'new_password2': 'badpassword', } response = self.client.post(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs), post_data) # Check that the user wasn't redirected self.assertEqual(response.status_code, 200) # Check that a validation error was raised self.assertTrue('new_password2' in response.context['form'].errors.keys()) self.assertTrue("The two password fields didn't match." in response.context['form'].errors['new_password2']) # Check that the password was not changed self.assertTrue(get_user_model().objects.get(username='test').check_password('password')) def test_password_reset_done_view(self): """ This tests that the password reset done view returns a password reset done page """ # Get password reset done page response = self.client.get(reverse('wagtailadmin_password_reset_done')) # Check that the user recieved a password reset done page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/done.html') def test_password_reset_complete_view(self): """ This tests that the password reset complete view returns a password reset complete page """ # Get password reset complete page response = self.client.get(reverse('wagtailadmin_password_reset_complete')) # Check that the user recieved a password reset complete page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/complete.html')
bsd-3-clause
amisrs/angular-flask
angular_flask/lib/python2.7/site-packages/requests/packages/urllib3/_collections.py
68
2903
# urllib3/_collections.py # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from collections import MutableMapping from threading import Lock try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict __all__ = ['RecentlyUsedContainer'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self._lock = Lock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self._lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self._lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self._lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self._lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self._lock: # Copy pointers to all values, then wipe the mapping # under Python 2, this copies the list of values twice :-| values = list(self._container.values()) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self._lock: return self._container.keys()
mit
vrenaville/project-service
project_stage_state/project.py
3
1221
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 Daniel Reis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields _TASK_STATE = [ ('draft', 'New'), ('open', 'In Progress'), ('pending', 'Pending'), ('done', 'Done'), ('cancelled', 'Cancelled')] class ProjectTaskType(models.Model): _inherit = 'project.task.type' state = fields.Selection(_TASK_STATE, 'State')
agpl-3.0
rmoorman/feedhq
feedhq/storage.py
1
2371
import tempfile import os import errno from django.conf import settings from django.core.files import locks from django.core.files.move import file_move_safe from django.utils.text import get_valid_filename from django.core.files.storage import FileSystemStorage class OverwritingStorage(FileSystemStorage): """ File storage that allows overwriting of stored files. """ def get_available_name(self, name, max_length=None): return name def _save(self, name, content): """ Lifted partially from django/core/files/storage.py """ full_path = self.path(name) directory = os.path.dirname(full_path) if not os.path.exists(directory): try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise if not os.path.isdir(directory): raise IOError("%s exists and is not a directory." % directory) # This file has a file path that we can move. if hasattr(content, 'temporary_file_path'): temp_data_location = content.temporary_file_path() else: tmp_prefix = "tmp_%s" % (get_valid_filename(name), ) temp_data_location = tempfile.mktemp(prefix=tmp_prefix, dir=self.location) try: # This is a normal uploadedfile that we can stream. # This fun binary flag incantation makes os.open throw an # OSError if the file already exists before we open it. fd = os.open(temp_data_location, os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0)) locks.lock(fd, locks.LOCK_EX) for chunk in content.chunks(): os.write(fd, chunk) locks.unlock(fd) os.close(fd) except Exception: if os.path.exists(temp_data_location): os.remove(temp_data_location) raise file_move_safe(temp_data_location, full_path, allow_overwrite=True) content.close() if settings.FILE_UPLOAD_PERMISSIONS is not None: os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS) return name
bsd-3-clause
PhenomX1998/FRACTALX-OP3
scripts/build-all.py
162
14627
#! /usr/bin/env python2 # Copyright (c) 2009-2015, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. from collections import namedtuple import glob from optparse import OptionParser import os import re import shutil import subprocess import sys import threading import Queue version = 'build-all.py, version 1.99' build_dir = '../all-kernels' make_command = ["vmlinux", "modules", "dtbs"] all_options = {} compile64 = os.environ.get('CROSS_COMPILE64') def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) if not os.environ.get('CROSS_COMPILE'): fail("CROSS_COMPILE must be set in the environment") def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise failed_targets = [] BuildResult = namedtuple('BuildResult', ['status', 'messages']) class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])): def set_width(self, width): self.width = width def __enter__(self): self.log = open(self.log_name, 'w') def __exit__(self, type, value, traceback): self.log.close() def run(self): self.status = None messages = ["Building: " + self.short_name] def printer(line): text = "[%-*s] %s" % (self.width, self.short_name, line) messages.append(text) self.log.write(text) self.log.write('\n') for step in self.steps: st = step.run(printer) if st: self.status = BuildResult(self.short_name, messages) break if not self.status: self.status = BuildResult(None, messages) class BuildTracker: """Manages all of the steps necessary to perform a build. The build consists of one or more sequences of steps. The different sequences can be processed independently, while the steps within a sequence must be done in order.""" def __init__(self, parallel_builds): self.sequence = [] self.lock = threading.Lock() self.parallel_builds = parallel_builds def add_sequence(self, log_name, short_name, steps): self.sequence.append(BuildSequence(log_name, short_name, steps)) def longest_name(self): longest = 0 for seq in self.sequence: longest = max(longest, len(seq.short_name)) return longest def __repr__(self): return "BuildTracker(%s)" % self.sequence def run_child(self, seq): seq.set_width(self.longest) tok = self.build_tokens.get() with self.lock: print "Building:", seq.short_name with seq: seq.run() self.results.put(seq.status) self.build_tokens.put(tok) def run(self): self.longest = self.longest_name() self.results = Queue.Queue() children = [] errors = [] self.build_tokens = Queue.Queue() nthreads = self.parallel_builds print "Building with", nthreads, "threads" for i in range(nthreads): self.build_tokens.put(True) for seq in self.sequence: child = threading.Thread(target=self.run_child, args=[seq]) children.append(child) child.start() for child in children: stats = self.results.get() if all_options.verbose: with self.lock: for line in stats.messages: print line sys.stdout.flush() if stats.status: errors.append(stats.status) for child in children: child.join() if errors: fail("\n ".join(["Failed targets:"] + errors)) class PrintStep: """A step that just prints a message""" def __init__(self, message): self.message = message def run(self, outp): outp(self.message) class MkdirStep: """A step that makes a directory""" def __init__(self, direc): self.direc = direc def run(self, outp): outp("mkdir %s" % self.direc) os.mkdir(self.direc) class RmtreeStep: def __init__(self, direc): self.direc = direc def run(self, outp): outp("rmtree %s" % self.direc) shutil.rmtree(self.direc, ignore_errors=True) class CopyfileStep: def __init__(self, src, dest): self.src = src self.dest = dest def run(self, outp): outp("cp %s %s" % (self.src, self.dest)) shutil.copyfile(self.src, self.dest) class ExecStep: def __init__(self, cmd, **kwargs): self.cmd = cmd self.kwargs = kwargs def run(self, outp): outp("exec: %s" % (" ".join(self.cmd),)) with open('/dev/null', 'r') as devnull: proc = subprocess.Popen(self.cmd, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **self.kwargs) stdout = proc.stdout while True: line = stdout.readline() if not line: break line = line.rstrip('\n') outp(line) result = proc.wait() if result != 0: return ('error', result) else: return None class Builder(): def __init__(self, name, defconfig): self.name = name self.defconfig = defconfig self.confname = self.defconfig.split('/')[-1] # Determine if this is a 64-bit target based on the location # of the defconfig. self.make_env = os.environ.copy() if "/arm64/" in defconfig: if compile64: self.make_env['CROSS_COMPILE'] = compile64 else: fail("Attempting to build 64-bit, without setting CROSS_COMPILE64") self.make_env['ARCH'] = 'arm64' else: self.make_env['ARCH'] = 'arm' self.make_env['KCONFIG_NOTIMESTAMP'] = 'true' self.log_name = "%s/log-%s.log" % (build_dir, self.name) def build(self): steps = [] dest_dir = os.path.join(build_dir, self.name) log_name = "%s/log-%s.log" % (build_dir, self.name) steps.append(PrintStep('Building %s in %s log %s' % (self.name, dest_dir, log_name))) if not os.path.isdir(dest_dir): steps.append(MkdirStep(dest_dir)) defconfig = self.defconfig dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir staging_dir = 'install_staging' modi_dir = '%s' % staging_dir hdri_dir = '%s/usr' % staging_dir steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir))) steps.append(ExecStep(['make', 'O=%s' % dest_dir, self.confname], env=self.make_env)) if not all_options.updateconfigs: # Build targets can be dependent upon the completion of # previous build targets, so build them one at a time. cmd_line = ['make', 'INSTALL_HDR_PATH=%s' % hdri_dir, 'INSTALL_MOD_PATH=%s' % modi_dir, 'O=%s' % dest_dir] build_targets = [] for c in make_command: if re.match(r'^-{1,2}\w', c): cmd_line.append(c) else: build_targets.append(c) for t in build_targets: steps.append(ExecStep(cmd_line + [t], env=self.make_env)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: steps.append(ExecStep(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=self.make_env)) steps.append(CopyfileStep(savedefconfig, defconfig)) return steps def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) with open(file, 'a') as defconfig: defconfig.write(str + '\n') def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = [] arch_pats = ( r'[fm]sm[0-9]*_defconfig', r'apq*_defconfig', r'qsd*_defconfig', r'mdm*_defconfig', r'mpq*_defconfig', ) arch64_pats = ( r'msm*_defconfig', ) for p in arch_pats: for n in glob.glob('arch/arm/configs/' + p): name = os.path.basename(n)[:-10] names.append(Builder(name, n)) if 'CROSS_COMPILE64' in os.environ: for p in arch64_pats: for n in glob.glob('arch/arm64/configs/' + p): name = os.path.basename(n)[:-10] + "-64" names.append(Builder(name, n)) return names def build_many(targets): print "Building %d target(s)" % len(targets) # To try and make up for the link phase being serial, try to do # two full builds in parallel. Don't do too many because lots of # parallel builds tends to use up available memory rather quickly. parallel = 2 if all_options.jobs and all_options.jobs > 1: j = max(all_options.jobs / parallel, 2) make_command.append("-j" + str(j)) tracker = BuildTracker(parallel) for target in targets: if all_options.updateconfigs: update_config(target.defconfig, all_options.updateconfigs) steps = target.build() tracker.add_sequence(target.log_name, target.name, steps) tracker.run() def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs: print " %s" % target.name sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if args == ['all']: build_many(configs) elif args == ['perf']: targets = [] for t in configs: if "perf" in t.name: targets.append(t) build_many(targets) elif args == ['noperf']: targets = [] for t in configs: if "perf" not in t.name: targets.append(t) build_many(targets) elif len(args) > 0: all_configs = {} for t in configs: all_configs[t.name] = t targets = [] for t in args: if t not in all_configs: parser.error("Target '%s' not one of %s" % (t, all_configs.keys())) targets.append(all_configs[t]) build_many(targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
ritviksahajpal/Py6S
Py6S/SixSHelpers/all_angles.py
1
13499
# This file is part of Py6S. # # Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file. # # Py6S is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Py6S is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Py6S. If not, see <http://www.gnu.org/licenses/>. import numpy as np from matplotlib.pyplot import * import itertools from multiprocessing.dummy import Pool import copy class Angles: @classmethod def run360(cls, s, solar_or_view, na=36, nz=10, output_name=None, n=None): """Runs Py6S for lots of angles to produce a polar contour plot. The calls to 6S for each angle will be run in parallel, making this function far faster than simply running a for loop over all of the angles. Arguments: * ``s`` -- A :class:`.SixS` instance configured with all of the parameters you want to run the simulation with * ``solar_or_view`` -- Set to ``'solar'`` if you want to iterate over the solar zenith/azimuth angles or ``'view'`` if you want to iterate over the view zenith/azimuth angles * ``output_name`` -- (Optional) The name of the output from the 6S simulation to plot. This should be a string containing exactly what you would put after ``s.outputs`` to print the output. For example `pixel_reflectance`. * ``na`` -- (Optional) The number of azimuth angles to iterate over to generate the data for the plot (defaults to 36, giving data every 10 degrees) * ``nz`` -- (Optional) The number of zenith angles to iterate over to generate the data for the plot (defaults to 10, giving data every 10 degrees) * ``n`` -- (Optional) The number of threads to run in parallel. This defaults to the number of CPU cores in your system, and is unlikely to need changing. For example:: s = SixS() s.ground_reflectance = GroundReflectance.HomogeneousWalthall(0.48, 0.50, 2.95, 0.6) s.geometry.solar_z = 30 s.geometry.solar_a = 0 data = SixSHelpers.Angles.run360(s, 'view', output_name='pixel_reflectance') """ results = [] azimuths = np.linspace(0, 360, na) zeniths = np.linspace(0, 89, nz) def f(args): azimuth, zenith = args s.outputs = None a = copy.deepcopy(s) if solar_or_view == 'view': a.geometry.view_a = azimuth a.geometry.view_z = zenith elif solar_or_view == 'solar': a.geometry.solar_a = azimuth a.geometry.solar_z = zenith else: raise ParameterException("all_angles", "You must choose to vary either the solar or view angle.") a.run() if output_name is None: return a.outputs else: return getattr(a.outputs, output_name) # Run the map if n is None: pool = Pool() else: pool = Pool(n) print "Running for many angles - this may take a long time" results = pool.map(f, itertools.product(azimuths, zeniths)) results = np.array(results) return (results, azimuths, zeniths, s.geometry.solar_a, s.geometry.solar_z) @classmethod def plot360(cls, data, output_name=None, show_sun=True, colorbarlabel=None): """Plot the data returned from :meth:`run360` as a polar contour plot, selecting an output if required. Arguments: * ``data`` -- The return value from :meth:`run360` * ``output_name`` -- (Optional) The output name to extract (eg. "pixel_reflectance") if the given data is provided as instances of the Outputs class * ``show_sun`` -- (Optional) Whether to show the location of the sun on the resulting polar plot. * ``colorbarlabel`` -- (Optional) The label to use on the color bar shown with the plot """ results, azimuths, zeniths, sa, sz = data if not isinstance(results[0], float): # The results are not floats, so a float must be extracted from the output if output_name is None: raise ParameterException("output_name", "You must specify an output name when plotting data which is given as Outputs instances") results = cls.extract_output(results, output_name) fig, ax, cax = cls.plot_polar_contour(results, azimuths, zeniths, colorbarlabel=colorbarlabel) if show_sun: ax.autoscale(False) ax.plot(np.radians(sa), sz, '*', markersize=20, markerfacecolor='yellow', markeredgecolor='red') show() return fig, ax @classmethod def run_and_plot_360(cls, s, solar_or_view, output_name, show_sun=True, na=36, nz=10, colorbarlabel=None): """Runs Py6S for lots of angles to produce a polar contour plot. Arguments: * ``s`` -- A :class:`.SixS` instance configured with all of the parameters you want to run the simulation with * ``solar_or_view`` -- Set to ``'solar'`` if you want to iterate over the solar zenith/azimuth angles or ``'view'`` if you want to iterate over the view zenith/azimuth angles * ``output_name`` -- The name of the output from SixS to plot. This should be a string containing exactly what you would put after ``s.outputs`` to print the output. For example `pixel_reflectance`. * ``show_sun`` -- (Optional) Whether to place a marker showing the location of the sun on the contour plot (defaults to True, has no effect when ``solar_or_view`` set to ``'solar'``.) * ``na`` -- (Optional) The number of azimuth angles to iterate over to generate the data for the plot (defaults to 36, giving data every 10 degrees) * ``nz`` -- (Optional) The number of zenith angles to iterate over to generate the data for the plot (defaults to 10, giving data every 10 degrees) * ``colorbarlabel`` -- (Optional) The label to use on the color bar shown with the plot For example:: s = SixS() s.ground_reflectance = GroundReflectance.HomogeneousWalthall(0.48, 0.50, 2.95, 0.6) s.geometry.solar_z = 30 s.geometry.solar_a = 0 SixSHelpers.Angles.run_and_plot_360(s, 'view', 'pixel_reflectance') """ if solar_or_view == 'solar': show_sun = False res = cls.run360(s, solar_or_view, na, nz) plot_res = cls.plot360(res, output_name, show_sun, colorbarlabel=colorbarlabel) return plot_res @classmethod def extract_output(cls, results, output_name): """Extracts data for one particular SixS output from a list of SixS.Outputs instances. Basically just a wrapper around a list comprehension. Arguments: * ``results`` -- A list of :class:`.SixS.Outputs` instances * ``output_name`` -- The name of the output to extract. This should be a string containing whatever is put after the `s.outputs` when printing the output, for example `'pixel_reflectance'`. """ results_output = [getattr(r, output_name) for r in results] return results_output @classmethod def plot_polar_contour(cls, values, azimuths, zeniths, filled=True, colorbarlabel=""): """Plot a polar contour plot, with 0 degrees at the North. Arguments: * ``values`` -- A list (or other iterable - eg. a NumPy array) of the values to plot on the contour plot (the `z` values) * ``azimuths`` -- A list of azimuths (in degrees) * ``zeniths`` -- A list of zeniths (that is, radii) * ``filled`` -- (Optional) Whether to plot a filled contour plot, or just the contours (defaults to filled) * ``yaxislabel`` -- (Optional) The label to use for the colorbar * ``colorbarlabel`` -- (Optional) The label to use on the color bar shown with the plot The shapes of these lists are important, and are designed for a particular use case (but should be more generally useful). The values list should be `len(azimuths) * len(zeniths)` long with data for the first azimuth for all the zeniths, then the second azimuth for all the zeniths etc. This is designed to work nicely with data that is produced using a loop as follows:: values = [] for azimuth in azimuths: for zenith in zeniths: # Do something and get a result values.append(result) After that code the azimuths, zeniths and values lists will be ready to be passed into this function. """ theta = np.radians(azimuths) zeniths = np.array(zeniths) values = np.array(values) values = values.reshape(len(azimuths), len(zeniths)) r, theta = np.meshgrid(zeniths, np.radians(azimuths)) fig, ax = subplots(subplot_kw=dict(projection='polar')) ax.set_theta_zero_location("N") ax.set_theta_direction(-1) if filled: cax = ax.contourf(theta, r, values, 30) else: cax = ax.contour(theta, r, values, 30) cb = fig.colorbar(cax) cb.set_label(colorbarlabel) return fig, ax, cax @classmethod def run_principal_plane(cls, s, output_name=None, n=None): """Runs the given 6S simulation to get the outputs for the solar principal plane. This function runs the simulation for all zenith angles in the azimuthal line of the sun. For example, if the solar azimuth is 90 degrees, this function will run simulations for:: Azimuth Zenith 90 85 90 80 90 75 90 70 90 65 90 60 90 55 ... .. 90 0 270 5 270 10 270 15 ... .. 270 80 270 85 The calls to 6S for each angle will be run in parallel, making this function far faster than simply running a for loop over each angle. Arguments: * ``s`` -- A :class:`.SixS` instance configured with all of the parameters you want to run the simulation with * ``output_name`` -- (Optional) The output name to extract (eg. "pixel_reflectance") if the given data is provided as instances of the Outputs class * ``n`` -- (Optional) The number of threads to run in parallel. This defaults to the number of CPU cores in your system, and is unlikely to need changing. Return values: A tuple containing zenith angles and the corresponding values or Outputs instances (depending on the arguments given). The zenith angles returned have been modified so that the zenith angles on the 'sun-side' are positive, and those on the other side (ie. past the vertical) are negative, for ease of plotting. """ # Get the solar azimuth and zenith angles from the SixS instance sa = s.geometry.solar_a # Compute the angles in the principal plane # Get the solar azimuth on the opposite side for the other half of the principal plane opp_sa = (sa + 180) % 360 # Calculate the first side (the solar zenith angle side) first_side_z = np.arange(85, -5, -5) first_side_a = np.repeat(sa, len(first_side_z)) # Calculate the other side temp = first_side_z[:-1] second_side_z = temp[::-1] # Reverse array second_side_a = np.repeat(opp_sa, len(second_side_z)) # Join the two sides together all_zeniths = np.hstack((first_side_z, second_side_z)) all_zeniths_for_return = np.hstack((first_side_z, -1 * second_side_z)) all_azimuths = np.hstack((first_side_a, second_side_a)) def f(arg): zenith, azimuth = arg s.outputs = None a = copy.deepcopy(s) a.geometry.view_z = zenith a.geometry.view_a = azimuth a.run() if output_name is None: return a.outputs else: return getattr(a.outputs, output_name) # Run the map if n is None: pool = Pool() else: pool = Pool(n) print "Running for many angles - this may take a long time" results = pool.map(f, zip(all_zeniths, all_azimuths)) results = np.array(results) results = np.array(results) return all_zeniths_for_return, results def plot_principal_plane(zeniths, values, y_axis_label): """Plot the results from a principal plane simulation (eg. a run of :meth:`.run_principal_plane`). Arguments: * ``zeniths`` -- A list of view zenith angles in degrees * ``values`` -- A list of simulated values for each of these angles * ``y_axis_label`` -- A string to use as the label for the y axis """ plot(zeniths, values) xlabel("View zenith angle (degrees)") ylabel(y_axis_label) show()
lgpl-3.0
Flamacue/pretix
src/tests/plugins/banktransfer/test_actions.py
2
9548
import json from datetime import timedelta import pytest from django.utils.timezone import now from pretix.base.models import ( Event, EventPermission, Item, Order, OrderPosition, Organizer, Quota, User, ) from pretix.plugins.banktransfer.models import BankImportJob, BankTransaction @pytest.fixture def env(): o = Organizer.objects.create(name='Dummy', slug='dummy') event = Event.objects.create( organizer=o, name='Dummy', slug='dummy', date_from=now(), plugins='pretix.plugins.banktransfer' ) user = User.objects.create_user('[email protected]', 'dummy') EventPermission.objects.create(user=user, event=event) o1 = Order.objects.create( code='1Z3AS', event=event, status=Order.STATUS_PENDING, datetime=now(), expires=now() + timedelta(days=10), total=23, payment_provider='banktransfer' ) o2 = Order.objects.create( code='6789Z', event=event, status=Order.STATUS_CANCELED, datetime=now(), expires=now() + timedelta(days=10), total=23, payment_provider='banktransfer' ) quota = Quota.objects.create(name="Test", size=2, event=event) item1 = Item.objects.create(event=event, name="Ticket", default_price=23) quota.items.add(item1) OrderPosition.objects.create(order=o1, item=item1, variation=None, price=23) return event, user, o1, o2 @pytest.mark.django_db def test_discard(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_ERROR, amount=0, date='unknown') client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'discard', }).content.decode('utf-8')) assert r['status'] == 'ok' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_DISCARDED assert trans.payer == '' @pytest.mark.django_db def test_accept_wrong_amount(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_INVALID, amount=12, date='unknown', order=env[2]) client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'accept', }).content.decode('utf-8')) assert r['status'] == 'ok' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_VALID env[2].refresh_from_db() assert env[2].status == Order.STATUS_PAID @pytest.mark.django_db def test_assign_order(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_NOMATCH, amount=23, date='unknown') client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'assign:{}'.format(env[2].code), }).content.decode('utf-8')) assert r['status'] == 'ok' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_VALID env[2].refresh_from_db() assert env[2].status == Order.STATUS_PAID @pytest.mark.django_db def test_assign_order_unknown(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_NOMATCH, amount=23, date='unknown') client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'assign:FOO' }).content.decode('utf-8')) assert r['status'] == 'error' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_NOMATCH @pytest.mark.django_db def test_assign_order_amount_incorrect(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_NOMATCH, amount=12, date='unknown') client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'assign:{}'.format(env[2].code) }).content.decode('utf-8')) assert r['status'] == 'error' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_NOMATCH @pytest.mark.django_db def test_comment(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_NOMATCH, amount=12, date='unknown') client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'comment:This is my comment'.format(env[2].code) }).content.decode('utf-8')) assert r['status'] == 'ok' trans.refresh_from_db() assert trans.comment == 'This is my comment' assert trans.state == BankTransaction.STATE_NOMATCH @pytest.mark.django_db def test_retry_success(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_DUPLICATE, amount=23, date='unknown', order=env[3]) client.login(email='[email protected]', password='dummy') env[3].status = Order.STATUS_PENDING env[3].save() r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'retry', }).content.decode('utf-8')) assert r['status'] == 'ok' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_VALID env[3].refresh_from_db() assert env[3].status == Order.STATUS_PAID @pytest.mark.django_db def test_retry_canceled(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_ERROR, amount=23, date='unknown', order=env[3]) client.login(email='[email protected]', password='dummy') r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'retry', }).content.decode('utf-8')) assert r['status'] == 'error' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_ERROR env[3].refresh_from_db() assert env[3].status == Order.STATUS_CANCELED @pytest.mark.django_db def test_retry_refunded(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_ERROR, amount=23, date='unknown', order=env[3]) client.login(email='[email protected]', password='dummy') env[3].status = Order.STATUS_REFUNDED env[3].save() r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'retry', }).content.decode('utf-8')) assert r['status'] == 'error' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_ERROR env[3].refresh_from_db() assert env[3].status == Order.STATUS_REFUNDED @pytest.mark.django_db def test_retry_paid(env, client): job = BankImportJob.objects.create(event=env[0]) trans = BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_ERROR, amount=23, date='unknown', order=env[3]) client.login(email='[email protected]', password='dummy') env[3].status = Order.STATUS_PAID env[3].save() r = json.loads(client.post('/control/event/{}/{}/banktransfer/action/'.format(env[0].organizer.slug, env[0].slug), { 'action_{}'.format(trans.pk): 'retry', }).content.decode('utf-8')) assert r['status'] == 'error' trans.refresh_from_db() assert trans.state == BankTransaction.STATE_ERROR env[3].refresh_from_db() assert env[3].status == Order.STATUS_PAID
apache-2.0
undefinedv/Jingubang
sqlmap/tamper/equaltolike.py
2
1136
#!/usr/bin/env python """ Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import re from lib.core.common import singleTimeWarnMessage from lib.core.enums import DBMS from lib.core.enums import PRIORITY __priority__ = PRIORITY.HIGHEST def dependencies(): singleTimeWarnMessage("tamper script '%s' is unlikely to work against %s" % (os.path.basename(__file__).split(".")[0], DBMS.PGSQL)) def tamper(payload, **kwargs): """ Replaces all occurances of operator equal ('=') with operator 'LIKE' Tested against: * Microsoft SQL Server 2005 * MySQL 4, 5.0 and 5.5 Notes: * Useful to bypass weak and bespoke web application firewalls that filter the equal character ('=') * The LIKE operator is SQL standard. Hence, this tamper script should work against all (?) databases >>> tamper('SELECT * FROM users WHERE id=1') 'SELECT * FROM users WHERE id LIKE 1' """ retVal = payload if payload: retVal = re.sub(r"\s*=\s*", " LIKE ", retVal) return retVal
gpl-3.0
tahnok/react-native
JSCLegacyProfiler/trace_data.py
375
8013
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re import unittest """ # _-----=> irqs-off # / _----=> need-resched # | / _---=> hardirq/softirq # || / _--=> preempt-depth # ||| / delay # TASK-PID CPU# |||| TIMESTAMP FUNCTION # | | | |||| | | <idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120 """ TRACE_LINE_PATTERN = re.compile( r'^\s*(?P<task>.+)-(?P<pid>\d+)\s+(?:\((?P<tgid>.+)\)\s+)?\[(?P<cpu>\d+)\]\s+(?:(?P<flags>\S{4})\s+)?(?P<timestamp>[0-9.]+):\s+(?P<function>.+)$') """ Example lines from custom app traces: 0: B|27295|providerRemove 0: E tracing_mark_write: S|27311|NNFColdStart<D-7744962>|1112249168 """ APP_TRACE_LINE_PATTERN = re.compile( r'^(?P<type>.+?): (?P<args>.+)$') """ Example section names: NNFColdStart NNFColdStart<0><T7744962> NNFColdStart<X> NNFColdStart<T7744962> """ DECORATED_SECTION_NAME_PATTERN = re.compile(r'^(?P<section_name>.*?)(?:<0>)?(?:<(?P<command>.)(?P<argument>.*?)>)?$') SYSTRACE_LINE_TYPES = set(['0', 'tracing_mark_write']) class TraceLine(object): def __init__(self, task, pid, tgid, cpu, flags, timestamp, function): self.task = task self.pid = pid self.tgid = tgid self.cpu = cpu self.flags = flags self.timestamp = timestamp self.function = function self.canceled = False @property def is_app_trace_line(self): return isinstance(self.function, AppTraceFunction) def cancel(self): self.canceled = True def __str__(self): if self.canceled: return "" elif self.tgid: return "{task:>16s}-{pid:<5d} ({tgid:5s}) [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self)) elif self.flags: return "{task:>16s}-{pid:<5d} [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self)) else: return "{task:>16s}-{pid:<5d} [{cpu:03d}] {timestamp:12.6f}: {function}\n".format(**vars(self)) class AppTraceFunction(object): def __init__(self, type, args): self.type = type self.args = args self.operation = args[0] if len(args) >= 2 and args[1]: self.pid = int(args[1]) if len(args) >= 3: self._section_name, self.command, self.argument = _parse_section_name(args[2]) args[2] = self._section_name else: self._section_name = None self.command = None self.argument = None self.cookie = None @property def section_name(self): return self._section_name @section_name.setter def section_name(self, value): self._section_name = value self.args[2] = value def __str__(self): return "{type}: {args}".format(type=self.type, args='|'.join(self.args)) class AsyncTraceFunction(AppTraceFunction): def __init__(self, type, args): super(AsyncTraceFunction, self).__init__(type, args) self.cookie = int(args[3]) TRACE_TYPE_MAP = { 'S': AsyncTraceFunction, 'T': AsyncTraceFunction, 'F': AsyncTraceFunction, } def parse_line(line): match = TRACE_LINE_PATTERN.match(line.strip()) if not match: return None task = match.group("task") pid = int(match.group("pid")) tgid = match.group("tgid") cpu = int(match.group("cpu")) flags = match.group("flags") timestamp = float(match.group("timestamp")) function = match.group("function") app_trace = _parse_function(function) if app_trace: function = app_trace return TraceLine(task, pid, tgid, cpu, flags, timestamp, function) def parse_dextr_line(line): task = line["name"] pid = line["pid"] tgid = line["tid"] cpu = None flags = None timestamp = line["ts"] function = AppTraceFunction("DextrTrace", [line["ph"], line["pid"], line["name"]]) return TraceLine(task, pid, tgid, cpu, flags, timestamp, function) def _parse_function(function): line_match = APP_TRACE_LINE_PATTERN.match(function) if not line_match: return None type = line_match.group("type") if not type in SYSTRACE_LINE_TYPES: return None args = line_match.group("args").split('|') if len(args) == 1 and len(args[0]) == 0: args = None constructor = TRACE_TYPE_MAP.get(args[0], AppTraceFunction) return constructor(type, args) def _parse_section_name(section_name): if section_name is None: return section_name, None, None section_name_match = DECORATED_SECTION_NAME_PATTERN.match(section_name) section_name = section_name_match.group("section_name") command = section_name_match.group("command") argument = section_name_match.group("argument") return section_name, command, argument def _format_section_name(section_name, command, argument): if not command: return section_name return "{section_name}<{command}{argument}>".format(**vars()) class RoundTripFormattingTests(unittest.TestCase): def testPlainSectionName(self): section_name = "SectionName12345-5562342fas" self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name))) def testDecoratedSectionName(self): section_name = "SectionName12345-5562342fas<D-123456>" self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name))) def testSimpleFunction(self): function = "0: E" self.assertEqual(function, str(_parse_function(function))) def testFunctionWithoutCookie(self): function = "0: B|27295|providerRemove" self.assertEqual(function, str(_parse_function(function))) def testFunctionWithCookie(self): function = "0: S|27311|NNFColdStart|1112249168" self.assertEqual(function, str(_parse_function(function))) def testFunctionWithCookieAndArgs(self): function = "0: T|27311|NNFColdStart|1122|Start" self.assertEqual(function, str(_parse_function(function))) def testFunctionWithArgsButNoPid(self): function = "0: E|||foo=bar" self.assertEqual(function, str(_parse_function(function))) def testKitKatFunction(self): function = "tracing_mark_write: B|14127|Looper.dispatchMessage|arg=>>>>> Dispatching to Handler (android.os.Handler) {422ae980} null: 0|Java" self.assertEqual(function, str(_parse_function(function))) def testNonSysTraceFunctionIgnored(self): function = "sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120" self.assertEqual(None, _parse_function(function)) def testLineWithFlagsAndTGID(self): line = " <idle>-0 ( 550) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n" self.assertEqual(line, str(parse_line(line))) def testLineWithFlagsAndNoTGID(self): line = " <idle>-0 (-----) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n" self.assertEqual(line, str(parse_line(line))) def testLineWithFlags(self): line = " <idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n" self.assertEqual(line, str(parse_line(line))) def testLineWithoutFlags(self): line = " <idle>-0 [001] 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n" self.assertEqual(line, str(parse_line(line)))
bsd-3-clause
nop33/indico-plugins
chat/indico_chat/controllers/event.py
1
1645
# This file is part of Indico. # Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from flask_pluginengine import current_plugin from indico.legacy.webinterface.rh.conferenceDisplay import RHConferenceBaseDisplay from indico_chat.models.chatrooms import ChatroomEventAssociation from indico_chat.views import WPChatEventPage class RHChatEventPage(RHConferenceBaseDisplay): """Lists the public chatrooms in a conference""" def _process(self): chatrooms = ChatroomEventAssociation.find_for_event(self.event_new).all() cols = set() if any(c.chatroom.description for c in chatrooms): cols.add('description') if any(c.chatroom.password for c in chatrooms): cols.add('password') return WPChatEventPage.render_template('event_page.html', self._conf, event_chatrooms=chatrooms, cols=cols, chat_links=current_plugin.settings.get('chat_links'))
gpl-3.0
sertac/django
tests/gis_tests/geo3d/tests.py
199
17484
from __future__ import unicode_literals import os import re from unittest import skipUnless from django.contrib.gis.db.models import Extent3D, Union from django.contrib.gis.db.models.functions import ( AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate, ) from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon from django.test import TestCase, ignore_warnings, skipUnlessDBFeature from django.utils._os import upath from django.utils.deprecation import ( RemovedInDjango20Warning, RemovedInDjango110Warning, ) from .models import ( City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D, MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D, ) if HAS_GDAL: from django.contrib.gis.utils import LayerMapping, LayerMapError data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data')) city_file = os.path.join(data_path, 'cities', 'cities.shp') vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt') # The coordinates of each city, with Z values corresponding to their # altitude in meters. city_data = ( ('Houston', (-95.363151, 29.763374, 18)), ('Dallas', (-96.801611, 32.782057, 147)), ('Oklahoma City', (-97.521157, 34.464642, 380)), ('Wellington', (174.783117, -41.315268, 14)), ('Pueblo', (-104.609252, 38.255001, 1433)), ('Lawrence', (-95.235060, 38.971823, 251)), ('Chicago', (-87.650175, 41.850385, 181)), ('Victoria', (-123.305196, 48.462611, 15)), ) # Reference mapping of city name to its altitude (Z value). city_dict = {name: coords for name, coords in city_data} # 3D freeway data derived from the National Elevation Dataset: # http://seamless.usgs.gov/products/9arc.php interstate_data = ( ('I-45', 'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,' '-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,' '-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,' '-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,' '-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,' '-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,' '-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,' '-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,' '-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,' '-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,' '-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)', (11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858, 15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16, 15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857, 15.435), ), ) # Bounding box polygon for inner-loop of Houston (in projected coordinate # system 32140), with elevation values from the National Elevation Dataset # (see above). bbox_data = ( 'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,' '942051.75 4208366.38,941527.97 4225693.20))', (21.71, 13.21, 9.12, 16.40, 21.71) ) class Geo3DLoadingHelper(object): def _load_interstate_data(self): # Interstate (2D / 3D and Geographic/Projected variants) for name, line, exp_z in interstate_data: line_3d = GEOSGeometry(line, srid=4269) line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269) # Creating a geographic and projected version of the # interstate in both 2D and 3D. Interstate3D.objects.create(name=name, line=line_3d) InterstateProj3D.objects.create(name=name, line=line_3d) Interstate2D.objects.create(name=name, line=line_2d) InterstateProj2D.objects.create(name=name, line=line_2d) def _load_city_data(self): for name, pnt_data in city_data: City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326)) def _load_polygon_data(self): bbox_wkt, bbox_z = bbox_data bbox_2d = GEOSGeometry(bbox_wkt, srid=32140) bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140) Polygon2D.objects.create(name='2D BBox', poly=bbox_2d) Polygon3D.objects.create(name='3D BBox', poly=bbox_3d) @skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.") @skipUnlessDBFeature("gis_enabled", "supports_3d_storage") class Geo3DTest(Geo3DLoadingHelper, TestCase): """ Only a subset of the PostGIS routines are 3D-enabled, and this TestCase tries to test the features that can handle 3D and that are also available within GeoDjango. For more information, see the PostGIS docs on the routines that support 3D: http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions """ def test_3d_hasz(self): """ Make sure data is 3D and has expected Z values -- shouldn't change because of coordinate system. """ self._load_interstate_data() for name, line, exp_z in interstate_data: interstate = Interstate3D.objects.get(name=name) interstate_proj = InterstateProj3D.objects.get(name=name) for i in [interstate, interstate_proj]: self.assertTrue(i.line.hasz) self.assertEqual(exp_z, tuple(i.line.z)) self._load_city_data() for name, pnt_data in city_data: city = City3D.objects.get(name=name) z = pnt_data[2] self.assertTrue(city.point.hasz) self.assertEqual(z, city.point.z) def test_3d_polygons(self): """ Test the creation of polygon 3D models. """ self._load_polygon_data() p3d = Polygon3D.objects.get(name='3D BBox') self.assertTrue(p3d.poly.hasz) self.assertIsInstance(p3d.poly, Polygon) self.assertEqual(p3d.poly.srid, 32140) def test_3d_layermapping(self): """ Testing LayerMapping on 3D models. """ point_mapping = {'point': 'POINT'} mpoint_mapping = {'mpoint': 'MULTIPOINT'} # The VRT is 3D, but should still be able to map sans the Z. lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False) lm.save() self.assertEqual(3, Point2D.objects.count()) # The city shapefile is 2D, and won't be able to fill the coordinates # in the 3D model -- thus, a LayerMapError is raised. self.assertRaises(LayerMapError, LayerMapping, Point3D, city_file, point_mapping, transform=False) # 3D model should take 3D data just fine. lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False) lm.save() self.assertEqual(3, Point3D.objects.count()) # Making sure LayerMapping.make_multi works right, by converting # a Point25D into a MultiPoint25D. lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False) lm.save() self.assertEqual(3, MultiPoint3D.objects.count()) @ignore_warnings(category=RemovedInDjango20Warning) def test_kml(self): """ Test GeoQuerySet.kml() with Z values. """ self._load_city_data() h = City3D.objects.kml(precision=6).get(name='Houston') # KML should be 3D. # `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';` ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$') self.assertTrue(ref_kml_regex.match(h.kml)) @ignore_warnings(category=RemovedInDjango20Warning) def test_geojson(self): """ Test GeoQuerySet.geojson() with Z values. """ self._load_city_data() h = City3D.objects.geojson(precision=6).get(name='Houston') # GeoJSON should be 3D # `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';` ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$') self.assertTrue(ref_json_regex.match(h.geojson)) @skipUnlessDBFeature("supports_3d_functions") def test_union(self): """ Testing the Union aggregate of 3D models. """ # PostGIS query that returned the reference EWKT for this test: # `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;` self._load_city_data() ref_ewkt = ( 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,' '-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,' '-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)' ) ref_union = GEOSGeometry(ref_ewkt) union = City3D.objects.aggregate(Union('point'))['point__union'] self.assertTrue(union.hasz) # Ordering of points in the resulting geometry may vary between implementations self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union}) @skipUnlessDBFeature("supports_3d_functions") @ignore_warnings(category=RemovedInDjango110Warning) def test_extent(self): """ Testing the Extent3D aggregate for 3D models. """ self._load_city_data() # `SELECT ST_Extent3D(point) FROM geo3d_city3d;` ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433) extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d'] extent2 = City3D.objects.extent3d() def check_extent3d(extent3d, tol=6): for ref_val, ext_val in zip(ref_extent3d, extent3d): self.assertAlmostEqual(ref_val, ext_val, tol) for e3d in [extent1, extent2]: check_extent3d(e3d) self.assertIsNone(City3D.objects.none().extent3d()) self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d']) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_perimeter(self): """ Testing GeoQuerySet.perimeter() on 3D fields. """ self._load_polygon_data() # Reference query for values below: # `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;` ref_perim_3d = 76859.2620451 ref_perim_2d = 76859.2577803 tol = 6 self.assertAlmostEqual(ref_perim_2d, Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m, tol) self.assertAlmostEqual(ref_perim_3d, Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m, tol) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_length(self): """ Testing GeoQuerySet.length() on 3D fields. """ # ST_Length_Spheroid Z-aware, and thus does not need to use # a separate function internally. # `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]') # FROM geo3d_interstate[2d|3d];` self._load_interstate_data() tol = 3 ref_length_2d = 4368.1721949481 ref_length_3d = 4368.62547052088 self.assertAlmostEqual(ref_length_2d, Interstate2D.objects.length().get(name='I-45').length.m, tol) self.assertAlmostEqual(ref_length_3d, Interstate3D.objects.length().get(name='I-45').length.m, tol) # Making sure `ST_Length3D` is used on for a projected # and 3D model rather than `ST_Length`. # `SELECT ST_Length(line) FROM geo3d_interstateproj2d;` ref_length_2d = 4367.71564892392 # `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;` ref_length_3d = 4368.16897234101 self.assertAlmostEqual(ref_length_2d, InterstateProj2D.objects.length().get(name='I-45').length.m, tol) self.assertAlmostEqual(ref_length_3d, InterstateProj3D.objects.length().get(name='I-45').length.m, tol) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_scale(self): """ Testing GeoQuerySet.scale() on Z values. """ self._load_city_data() # Mapping of City name to reference Z values. zscales = (-3, 4, 23) for zscale in zscales: for city in City3D.objects.scale(1.0, 1.0, zscale): self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_translate(self): """ Testing GeoQuerySet.translate() on Z values. """ self._load_city_data() ztranslations = (5.23, 23, -17) for ztrans in ztranslations: for city in City3D.objects.translate(0, 0, ztrans): self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z) @skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.") @skipUnlessDBFeature("gis_enabled", "supports_3d_functions") class Geo3DFunctionsTests(Geo3DLoadingHelper, TestCase): def test_kml(self): """ Test KML() function with Z values. """ self._load_city_data() h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston') # KML should be 3D. # `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';` ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$') self.assertTrue(ref_kml_regex.match(h.kml)) def test_geojson(self): """ Test GeoJSON() function with Z values. """ self._load_city_data() h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston') # GeoJSON should be 3D # `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';` ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$') self.assertTrue(ref_json_regex.match(h.geojson)) def test_perimeter(self): """ Testing Perimeter() function on 3D fields. """ self._load_polygon_data() # Reference query for values below: # `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;` ref_perim_3d = 76859.2620451 ref_perim_2d = 76859.2577803 tol = 6 poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox') self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol) poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox') self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol) def test_length(self): """ Testing Length() function on 3D fields. """ # ST_Length_Spheroid Z-aware, and thus does not need to use # a separate function internally. # `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]') # FROM geo3d_interstate[2d|3d];` self._load_interstate_data() tol = 3 ref_length_2d = 4368.1721949481 ref_length_3d = 4368.62547052088 inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol) inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol) # Making sure `ST_Length3D` is used on for a projected # and 3D model rather than `ST_Length`. # `SELECT ST_Length(line) FROM geo3d_interstateproj2d;` ref_length_2d = 4367.71564892392 # `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;` ref_length_3d = 4368.16897234101 inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol) inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol) def test_scale(self): """ Testing Scale() function on Z values. """ self._load_city_data() # Mapping of City name to reference Z values. zscales = (-3, 4, 23) for zscale in zscales: for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)): self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z) def test_translate(self): """ Testing Translate() function on Z values. """ self._load_city_data() ztranslations = (5.23, 23, -17) for ztrans in ztranslations: for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)): self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
bsd-3-clause
KaranToor/MA450
google-cloud-sdk/platform/gsutil/third_party/boto/boto/cloudsearch2/domain.py
153
21247
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.cloudsearch2.optionstatus import IndexFieldStatus from boto.cloudsearch2.optionstatus import ServicePoliciesStatus from boto.cloudsearch2.optionstatus import ExpressionStatus from boto.cloudsearch2.optionstatus import AvailabilityOptionsStatus from boto.cloudsearch2.optionstatus import ScalingParametersStatus from boto.cloudsearch2.document import DocumentServiceConnection from boto.cloudsearch2.search import SearchConnection def handle_bool(value): if value in [True, 'true', 'True', 'TRUE', 1]: return True return False class Domain(object): """ A Cloudsearch domain. :ivar name: The name of the domain. :ivar id: The internally generated unique identifier for the domain. :ivar created: A boolean which is True if the domain is created. It can take several minutes to initialize a domain when CreateDomain is called. Newly created search domains are returned with a False value for Created until domain creation is complete :ivar deleted: A boolean which is True if the search domain has been deleted. The system must clean up resources dedicated to the search domain when delete is called. Newly deleted search domains are returned from list_domains with a True value for deleted for several minutes until resource cleanup is complete. :ivar processing: True if processing is being done to activate the current domain configuration. :ivar num_searchable_docs: The number of documents that have been submittted to the domain and indexed. :ivar requires_index_document: True if index_documents needs to be called to activate the current domain configuration. :ivar search_instance_count: The number of search instances that are available to process search requests. :ivar search_instance_type: The instance type that is being used to process search requests. :ivar search_partition_count: The number of partitions across which the search index is spread. """ def __init__(self, layer1, data): """ Constructor - Create a domain object from a layer1 and data params :type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object :param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object which is used to perform operations on the domain. """ self.layer1 = layer1 self.update_from_data(data) def update_from_data(self, data): self.created = data['Created'] self.deleted = data['Deleted'] self.processing = data['Processing'] self.requires_index_documents = data['RequiresIndexDocuments'] self.domain_id = data['DomainId'] self.domain_name = data['DomainName'] self.search_instance_count = data['SearchInstanceCount'] self.search_instance_type = data.get('SearchInstanceType', None) self.search_partition_count = data['SearchPartitionCount'] self._doc_service = data['DocService'] self._service_arn = data['ARN'] self._search_service = data['SearchService'] @property def service_arn(self): return self._service_arn @property def doc_service_endpoint(self): return self._doc_service['Endpoint'] @property def search_service_endpoint(self): return self._search_service['Endpoint'] @property def created(self): return self._created @created.setter def created(self, value): self._created = handle_bool(value) @property def deleted(self): return self._deleted @deleted.setter def deleted(self, value): self._deleted = handle_bool(value) @property def processing(self): return self._processing @processing.setter def processing(self, value): self._processing = handle_bool(value) @property def requires_index_documents(self): return self._requires_index_documents @requires_index_documents.setter def requires_index_documents(self, value): self._requires_index_documents = handle_bool(value) @property def search_partition_count(self): return self._search_partition_count @search_partition_count.setter def search_partition_count(self, value): self._search_partition_count = int(value) @property def search_instance_count(self): return self._search_instance_count @search_instance_count.setter def search_instance_count(self, value): self._search_instance_count = int(value) @property def name(self): return self.domain_name @property def id(self): return self.domain_id def delete(self): """ Delete this domain and all index data associated with it. """ return self.layer1.delete_domain(self.name) def get_analysis_schemes(self): """ Return a list of Analysis Scheme objects. """ return self.layer1.describe_analysis_schemes(self.name) def get_availability_options(self): """ Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` object representing the currently defined availability options for the domain. :return: OptionsStatus object :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` object """ return AvailabilityOptionsStatus( self, refresh_fn=self.layer1.describe_availability_options, refresh_key=['DescribeAvailabilityOptionsResponse', 'DescribeAvailabilityOptionsResult', 'AvailabilityOptions'], save_fn=self.layer1.update_availability_options) def get_scaling_options(self): """ Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus` object representing the currently defined scaling options for the domain. :return: ScalingParametersStatus object :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` object """ return ScalingParametersStatus( self, refresh_fn=self.layer1.describe_scaling_parameters, refresh_key=['DescribeScalingParametersResponse', 'DescribeScalingParametersResult', 'ScalingParameters'], save_fn=self.layer1.update_scaling_parameters) def get_access_policies(self): """ Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object representing the currently defined access policies for the domain. :return: ServicePoliciesStatus object :rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object """ return ServicePoliciesStatus( self, refresh_fn=self.layer1.describe_service_access_policies, refresh_key=['DescribeServiceAccessPoliciesResponse', 'DescribeServiceAccessPoliciesResult', 'AccessPolicies'], save_fn=self.layer1.update_service_access_policies) def index_documents(self): """ Tells the search domain to start indexing its documents using the latest text processing options and IndexFields. This operation must be invoked to make options whose OptionStatus has OptionState of RequiresIndexDocuments visible in search results. """ self.layer1.index_documents(self.name) def get_index_fields(self, field_names=None): """ Return a list of index fields defined for this domain. :return: list of IndexFieldStatus objects :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` object """ data = self.layer1.describe_index_fields(self.name, field_names) data = (data['DescribeIndexFieldsResponse'] ['DescribeIndexFieldsResult'] ['IndexFields']) return [IndexFieldStatus(self, d) for d in data] def create_index_field(self, field_name, field_type, default='', facet=False, returnable=False, searchable=False, sortable=False, highlight=False, source_field=None, analysis_scheme=None): """ Defines an ``IndexField``, either replacing an existing definition or creating a new one. :type field_name: string :param field_name: The name of a field in the search index. :type field_type: string :param field_type: The type of field. Valid values are int | double | literal | text | date | latlon | int-array | double-array | literal-array | text-array | date-array :type default: string or int :param default: The default value for the field. If the field is of type ``int`` this should be an integer value. Otherwise, it's a string. :type facet: bool :param facet: A boolean to indicate whether facets are enabled for this field or not. Does not apply to fields of type ``int, int-array, text, text-array``. :type returnable: bool :param returnable: A boolean to indicate whether values of this field can be returned in search results or used in ranking. :type searchable: bool :param searchable: A boolean to indicate whether search is enabled for this field or not. :type sortable: bool :param sortable: A boolean to indicate whether sorting is enabled for this field or not. Does not apply to fields of array types. :type highlight: bool :param highlight: A boolean to indicate whether highlighting is enabled for this field or not. Does not apply to fields of type ``double, int, date, latlon`` :type source_field: list of strings or string :param source_field: For array types, this is the list of fields to treat as the source. For singular types, pass a string only. :type analysis_scheme: string :param analysis_scheme: The analysis scheme to use for this field. Only applies to ``text | text-array`` field types :return: IndexFieldStatus objects :rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object :raises: BaseException, InternalException, LimitExceededException, InvalidTypeException, ResourceNotFoundException """ index = { 'IndexFieldName': field_name, 'IndexFieldType': field_type } if field_type == 'literal': index['LiteralOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable, 'SortEnabled': sortable } if default: index['LiteralOptions']['DefaultValue'] = default if source_field: index['LiteralOptions']['SourceField'] = source_field elif field_type == 'literal-array': index['LiteralArrayOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable } if default: index['LiteralArrayOptions']['DefaultValue'] = default if source_field: index['LiteralArrayOptions']['SourceFields'] = \ ','.join(source_field) elif field_type == 'int': index['IntOptions'] = { 'DefaultValue': default, 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable, 'SortEnabled': sortable } if default: index['IntOptions']['DefaultValue'] = default if source_field: index['IntOptions']['SourceField'] = source_field elif field_type == 'int-array': index['IntArrayOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable } if default: index['IntArrayOptions']['DefaultValue'] = default if source_field: index['IntArrayOptions']['SourceFields'] = \ ','.join(source_field) elif field_type == 'date': index['DateOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable, 'SortEnabled': sortable } if default: index['DateOptions']['DefaultValue'] = default if source_field: index['DateOptions']['SourceField'] = source_field elif field_type == 'date-array': index['DateArrayOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable } if default: index['DateArrayOptions']['DefaultValue'] = default if source_field: index['DateArrayOptions']['SourceFields'] = \ ','.join(source_field) elif field_type == 'double': index['DoubleOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable, 'SortEnabled': sortable } if default: index['DoubleOptions']['DefaultValue'] = default if source_field: index['DoubleOptions']['SourceField'] = source_field elif field_type == 'double-array': index['DoubleArrayOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable } if default: index['DoubleArrayOptions']['DefaultValue'] = default if source_field: index['DoubleArrayOptions']['SourceFields'] = \ ','.join(source_field) elif field_type == 'text': index['TextOptions'] = { 'ReturnEnabled': returnable, 'HighlightEnabled': highlight, 'SortEnabled': sortable } if default: index['TextOptions']['DefaultValue'] = default if source_field: index['TextOptions']['SourceField'] = source_field if analysis_scheme: index['TextOptions']['AnalysisScheme'] = analysis_scheme elif field_type == 'text-array': index['TextArrayOptions'] = { 'ReturnEnabled': returnable, 'HighlightEnabled': highlight } if default: index['TextArrayOptions']['DefaultValue'] = default if source_field: index['TextArrayOptions']['SourceFields'] = \ ','.join(source_field) if analysis_scheme: index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme elif field_type == 'latlon': index['LatLonOptions'] = { 'FacetEnabled': facet, 'ReturnEnabled': returnable, 'SearchEnabled': searchable, 'SortEnabled': sortable } if default: index['LatLonOptions']['DefaultValue'] = default if source_field: index['LatLonOptions']['SourceField'] = source_field data = self.layer1.define_index_field(self.name, index) data = (data['DefineIndexFieldResponse'] ['DefineIndexFieldResult'] ['IndexField']) return IndexFieldStatus(self, data, self.layer1.describe_index_fields) def get_expressions(self, names=None): """ Return a list of rank expressions defined for this domain. :return: list of ExpressionStatus objects :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` object """ fn = self.layer1.describe_expressions data = fn(self.name, names) data = (data['DescribeExpressionsResponse'] ['DescribeExpressionsResult'] ['Expressions']) return [ExpressionStatus(self, d, fn) for d in data] def create_expression(self, name, value): """ Create a new expression. :type name: string :param name: The name of an expression for processing during a search request. :type value: string :param value: The expression to evaluate for ranking or thresholding while processing a search request. The Expression syntax is based on JavaScript expressions and supports: * Single value, sort enabled numeric fields (int, double, date) * Other expressions * The _score variable, which references a document's relevance score * The _time variable, which references the current epoch time * Integer, floating point, hex, and octal literals * Arithmetic operators: + - * / % * Bitwise operators: | & ^ ~ << >> >>> * Boolean operators (including the ternary operator): && || ! ?: * Comparison operators: < <= == >= > * Mathematical functions: abs ceil exp floor ln log2 log10 logn max min pow sqrt pow * Trigonometric functions: acos acosh asin asinh atan atan2 atanh cos cosh sin sinh tanh tan * The haversin distance function Expressions always return an integer value from 0 to the maximum 64-bit signed integer value (2^63 - 1). Intermediate results are calculated as double-precision floating point values and the return value is rounded to the nearest integer. If the expression is invalid or evaluates to a negative value, it returns 0. If the expression evaluates to a value greater than the maximum, it returns the maximum value. The source data for an Expression can be the name of an IndexField of type int or double, another Expression or the reserved name _score. The _score source is defined to return as a double from 0 to 10.0 (inclusive) to indicate how relevant a document is to the search request, taking into account repetition of search terms in the document and proximity of search terms to each other in each matching IndexField in the document. For more information about using rank expressions to customize ranking, see the Amazon CloudSearch Developer Guide. :return: ExpressionStatus object :rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object :raises: BaseException, InternalException, LimitExceededException, InvalidTypeException, ResourceNotFoundException """ data = self.layer1.define_expression(self.name, name, value) data = (data['DefineExpressionResponse'] ['DefineExpressionResult'] ['Expression']) return ExpressionStatus(self, data, self.layer1.describe_expressions) def get_document_service(self): return DocumentServiceConnection(domain=self) def get_search_service(self): return SearchConnection(domain=self) def __repr__(self): return '<Domain: %s>' % self.domain_name
apache-2.0
rigdenlab/SIMBAD
i2/SIMBAD_report.py
1
7959
""" SIMBAD_report.py: CCP4 GUI Project This library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 3, modified in accordance with the provisions of the license to address the requirements of UK law. You should have received a copy of the modified GNU Lesser General Public License along with this library. If not, copies may be downloaded from http://www.ccp4.ac.uk/ccp4license.php This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. """ import os import re if __name__ == '__main__': import sys ccp4 = os.environ['CCP4'] sys.path.append(os.path.join(ccp4, 'share', 'ccp4i2', 'report')) sys.path.append(os.path.join(ccp4, 'share', 'ccp4i2', 'core')) sys.path.append(os.path.join(ccp4, 'lib', 'python2.7', 'site-packages')) from lxml import etree as ET from report.CCP4ReportParser import Report from simbad.util import SIMBAD_DIRNAME, SIMBAD_PYRVAPI_SHAREDIR class SIMBAD_report(Report): TASKNAME = 'SIMBAD' RUNNING = True def __init__(self, xmlnode=None, jobInfo={}, **kw): Report.__init__(self, xmlnode=xmlnode, jobInfo=jobInfo, **kw) repdir = os.path.join(jobInfo.get('fileroot', None), SIMBAD_DIRNAME, SIMBAD_PYRVAPI_SHAREDIR) self.get_tables_as_elements(repdir) #print("JMHT WRITING REPORT %s" % self.e1_dict) self.addDiv(style='clear:both;') for e1 in xmlnode: # Process each tab separately if e1.tag == 'tab': self.report_section(e1, self) return def get_tables_as_elements(self, repdir): """Get tables as xmltree elements by parsing task.tsk file and .table files""" try: t1_list = list() with open(os.path.join(repdir, 'task.tsk')) as istream: #print("JMHT CHECKING task.tsk %s\n" % os.path.join(repdir, 'task.tsk')) for s1 in re.findall('<table .+?</table>', istream.read(), re.S): t1 = ET.fromstring(s1) if len(t1): t1_list.append(t1) for f1 in os.listdir(repdir): if f1.endswith('.table'): t1 = ET.parse(os.path.join(repdir, f1)).getroot() if len(t1): t1_list.append(t1) self.e1_dict = dict() for t1 in t1_list: tid = t1.get('id', None) if tid and tid.endswith('-grid'): tags = [t2.tag for t2 in t1] if tags == ['thead', 'tbody']: assert len(t1) == 2 e1 = t1 else: tset = set(tags) tag = tset.pop() assert not tset and tag == 'tr' e1 = ET.Element('table') e1.append(t1) e1.attrib.update(t1.attrib) t1.attrib.clear() t1.tag = 'tbody' for e2 in e1.iter(): e2.attrib.pop('class', None) e1.find('tbody').set('class', 'fancy') self.e1_dict[tid[:-5]] = e1 if len(self.e1_dict.keys()): return True return False except Exception as e: print "EXCEPTION: {0}".format(e) return def report_section(self, e1, r0, sort=False): """ """ elems = list() title = 'Untitled' state = False cou = 0 #print("Processing tag %s id %s\n%s" % (e1.tag, e1.get('id'),ET.tostring(e1))) for e2 in e1: row = e2.get('row', '_') col = e2.get('col', '_') if row.isdigit() : row = int(row) if col.isdigit() : col = int(col) if e2.get('id') or e2.tag == 'text': elems.append([row, col, e2]) if e2.tag == 'table': cou += 1 elif e2.tag == 'name': title = e2.text.strip() elif e2.tag == 'open': state = e2.text.strip() == 'true' if elems: # strip out anything we can't deal with here if any([x in title.lower() for x in ['downloads', 'log files', 'graph']]): return #print "GOT ELEMS ",[g[2].get('id') for g in elems],title r1 = r0.addFold(label=title, initiallyOpen=state) #for row, col, e2 in sorted(grid): if sorted: elems = sorted(elems) for _,_,e2 in elems: id2 = e2.get('id') #print "PROCESSING ",id2, e2.tag if e2.tag == 'section': self.report_section(e2, r1) elif e2.tag == 'table': if id2 and id2 in self.e1_dict: if id2 == 'mrbump_table': r1.append("The table below details the Molecular Replacement results from MrBUMP") if cou > 1: r1.append(e2.findtext('legend').strip()) r1.append(ET.tostring(self.e1_dict[id2])) elif e2.tag == 'text': for t in e2.itertext(): r1.append(t) else: pass if __name__ == '__main__': # Run with no arguments in the CCP4 job directory (the one that holds the SIMBAD directory) def test2(): import argparse parser = argparse.ArgumentParser( description='test of morda report generator', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( '-w', '--wrkdir', help='''a directory, containing the subdirectory report/ generated by rvapi''', default='.', metavar='<dir>' ) parser.add_argument( '-i', '--xml', help='input xml-file generated previously by rvapi', default='program.xml', metavar='<file>' ) parser.add_argument( '-o', '--html', help='output html-file, a report file for i2', default='areport.html', metavar='<file>' ) opt = parser.parse_args() xmlnode = ET.parse(opt.xml).getroot() jobInfo = dict(fileroot=os.path.abspath(opt.wrkdir)) report = SIMBAD_report(xmlnode, jobInfo) if len(report.errReport): print 'ERROR REPORT' print report.errReport.report() htmlbase = 'file://' + \ os.environ['CCP4'] + '/share/ccp4i2/docs/report_files' htmlstr = ET.tostring(report.as_etree(htmlBase=htmlbase)) with open(opt.html, 'w') as ostream: print >> ostream, htmlstr.replace('><', '>\n<') test2() # #from CCP4ReportParser import Report # # class AMPLE_report(Report): # # # Specify which gui task and/or pluginscript this applies to # # TASKNAME = 'AMPLE' # # RUNNING = False # # def __init__(self,xmlnode=None,jobInfo={},jobStatus=None,**kw): # # Report. __init__(self,xmlnode=xmlnode,jobInfo=jobInfo, jobStatus=jobStatus, **kw) # # clearingDiv = self.addDiv(style="clear:both;") # # self.addDefaultReport(self) # # # # def addDefaultReport(self, parent=None): # # if parent is None: parent=self # # if len(self.xmlnode.xpath("LogText")) > 0: # # newFold = parent.addFold(label="Log text", initiallyOpen=True) # # newFold.addPre(text = self.xmlnode.xpath("LogText")[0].text)
bsd-3-clause
Isendir/brython
www/src/Lib/xml/sax/expatreader.py
870
14659
""" SAX driver for the pyexpat C module. This driver works with pyexpat.__version__ == '2.22'. """ version = "0.20" from xml.sax._exceptions import * from xml.sax.handler import feature_validation, feature_namespaces from xml.sax.handler import feature_namespace_prefixes from xml.sax.handler import feature_external_ges, feature_external_pes from xml.sax.handler import feature_string_interning from xml.sax.handler import property_xml_string, property_interning_dict # xml.parsers.expat does not raise ImportError in Jython import sys if sys.platform[:4] == "java": raise SAXReaderNotAvailable("expat not available in Java", None) del sys try: from xml.parsers import expat except ImportError: raise SAXReaderNotAvailable("expat not supported", None) else: if not hasattr(expat, "ParserCreate"): raise SAXReaderNotAvailable("expat not supported", None) from xml.sax import xmlreader, saxutils, handler AttributesImpl = xmlreader.AttributesImpl AttributesNSImpl = xmlreader.AttributesNSImpl # If we're using a sufficiently recent version of Python, we can use # weak references to avoid cycles between the parser and content # handler, otherwise we'll just have to pretend. try: import _weakref except ImportError: def _mkproxy(o): return o else: import weakref _mkproxy = weakref.proxy del weakref, _weakref # --- ExpatLocator class ExpatLocator(xmlreader.Locator): """Locator for use with the ExpatParser class. This uses a weak reference to the parser object to avoid creating a circular reference between the parser and the content handler. """ def __init__(self, parser): self._ref = _mkproxy(parser) def getColumnNumber(self): parser = self._ref if parser._parser is None: return None return parser._parser.ErrorColumnNumber def getLineNumber(self): parser = self._ref if parser._parser is None: return 1 return parser._parser.ErrorLineNumber def getPublicId(self): parser = self._ref if parser is None: return None return parser._source.getPublicId() def getSystemId(self): parser = self._ref if parser is None: return None return parser._source.getSystemId() # --- ExpatParser class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator): """SAX driver for the pyexpat C module.""" def __init__(self, namespaceHandling=0, bufsize=2**16-20): xmlreader.IncrementalParser.__init__(self, bufsize) self._source = xmlreader.InputSource() self._parser = None self._namespaces = namespaceHandling self._lex_handler_prop = None self._parsing = 0 self._entity_stack = [] self._external_ges = 1 self._interning = None # XMLReader methods def parse(self, source): "Parse an XML document from a URL or an InputSource." source = saxutils.prepare_input_source(source) self._source = source self.reset() self._cont_handler.setDocumentLocator(ExpatLocator(self)) xmlreader.IncrementalParser.parse(self, source) def prepareParser(self, source): if source.getSystemId() is not None: self._parser.SetBase(source.getSystemId()) # Redefined setContentHandler to allow changing handlers during parsing def setContentHandler(self, handler): xmlreader.IncrementalParser.setContentHandler(self, handler) if self._parsing: self._reset_cont_handler() def getFeature(self, name): if name == feature_namespaces: return self._namespaces elif name == feature_string_interning: return self._interning is not None elif name in (feature_validation, feature_external_pes, feature_namespace_prefixes): return 0 elif name == feature_external_ges: return self._external_ges raise SAXNotRecognizedException("Feature '%s' not recognized" % name) def setFeature(self, name, state): if self._parsing: raise SAXNotSupportedException("Cannot set features while parsing") if name == feature_namespaces: self._namespaces = state elif name == feature_external_ges: self._external_ges = state elif name == feature_string_interning: if state: if self._interning is None: self._interning = {} else: self._interning = None elif name == feature_validation: if state: raise SAXNotSupportedException( "expat does not support validation") elif name == feature_external_pes: if state: raise SAXNotSupportedException( "expat does not read external parameter entities") elif name == feature_namespace_prefixes: if state: raise SAXNotSupportedException( "expat does not report namespace prefixes") else: raise SAXNotRecognizedException( "Feature '%s' not recognized" % name) def getProperty(self, name): if name == handler.property_lexical_handler: return self._lex_handler_prop elif name == property_interning_dict: return self._interning elif name == property_xml_string: if self._parser: if hasattr(self._parser, "GetInputContext"): return self._parser.GetInputContext() else: raise SAXNotRecognizedException( "This version of expat does not support getting" " the XML string") else: raise SAXNotSupportedException( "XML string cannot be returned when not parsing") raise SAXNotRecognizedException("Property '%s' not recognized" % name) def setProperty(self, name, value): if name == handler.property_lexical_handler: self._lex_handler_prop = value if self._parsing: self._reset_lex_handler_prop() elif name == property_interning_dict: self._interning = value elif name == property_xml_string: raise SAXNotSupportedException("Property '%s' cannot be set" % name) else: raise SAXNotRecognizedException("Property '%s' not recognized" % name) # IncrementalParser methods def feed(self, data, isFinal = 0): if not self._parsing: self.reset() self._parsing = 1 self._cont_handler.startDocument() try: # The isFinal parameter is internal to the expat reader. # If it is set to true, expat will check validity of the entire # document. When feeding chunks, they are not normally final - # except when invoked from close. self._parser.Parse(data, isFinal) except expat.error as e: exc = SAXParseException(expat.ErrorString(e.code), e, self) # FIXME: when to invoke error()? self._err_handler.fatalError(exc) def close(self): if self._entity_stack: # If we are completing an external entity, do nothing here return self.feed("", isFinal = 1) self._cont_handler.endDocument() self._parsing = 0 # break cycle created by expat handlers pointing to our methods self._parser = None bs = self._source.getByteStream() if bs is not None: bs.close() def _reset_cont_handler(self): self._parser.ProcessingInstructionHandler = \ self._cont_handler.processingInstruction self._parser.CharacterDataHandler = self._cont_handler.characters def _reset_lex_handler_prop(self): lex = self._lex_handler_prop parser = self._parser if lex is None: parser.CommentHandler = None parser.StartCdataSectionHandler = None parser.EndCdataSectionHandler = None parser.StartDoctypeDeclHandler = None parser.EndDoctypeDeclHandler = None else: parser.CommentHandler = lex.comment parser.StartCdataSectionHandler = lex.startCDATA parser.EndCdataSectionHandler = lex.endCDATA parser.StartDoctypeDeclHandler = self.start_doctype_decl parser.EndDoctypeDeclHandler = lex.endDTD def reset(self): if self._namespaces: self._parser = expat.ParserCreate(self._source.getEncoding(), " ", intern=self._interning) self._parser.namespace_prefixes = 1 self._parser.StartElementHandler = self.start_element_ns self._parser.EndElementHandler = self.end_element_ns else: self._parser = expat.ParserCreate(self._source.getEncoding(), intern = self._interning) self._parser.StartElementHandler = self.start_element self._parser.EndElementHandler = self.end_element self._reset_cont_handler() self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl self._parser.NotationDeclHandler = self.notation_decl self._parser.StartNamespaceDeclHandler = self.start_namespace_decl self._parser.EndNamespaceDeclHandler = self.end_namespace_decl self._decl_handler_prop = None if self._lex_handler_prop: self._reset_lex_handler_prop() # self._parser.DefaultHandler = # self._parser.DefaultHandlerExpand = # self._parser.NotStandaloneHandler = self._parser.ExternalEntityRefHandler = self.external_entity_ref try: self._parser.SkippedEntityHandler = self.skipped_entity_handler except AttributeError: # This pyexpat does not support SkippedEntity pass self._parser.SetParamEntityParsing( expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE) self._parsing = 0 self._entity_stack = [] # Locator methods def getColumnNumber(self): if self._parser is None: return None return self._parser.ErrorColumnNumber def getLineNumber(self): if self._parser is None: return 1 return self._parser.ErrorLineNumber def getPublicId(self): return self._source.getPublicId() def getSystemId(self): return self._source.getSystemId() # event handlers def start_element(self, name, attrs): self._cont_handler.startElement(name, AttributesImpl(attrs)) def end_element(self, name): self._cont_handler.endElement(name) def start_element_ns(self, name, attrs): pair = name.split() if len(pair) == 1: # no namespace pair = (None, name) elif len(pair) == 3: pair = pair[0], pair[1] else: # default namespace pair = tuple(pair) newattrs = {} qnames = {} for (aname, value) in attrs.items(): parts = aname.split() length = len(parts) if length == 1: # no namespace qname = aname apair = (None, aname) elif length == 3: qname = "%s:%s" % (parts[2], parts[1]) apair = parts[0], parts[1] else: # default namespace qname = parts[1] apair = tuple(parts) newattrs[apair] = value qnames[apair] = qname self._cont_handler.startElementNS(pair, None, AttributesNSImpl(newattrs, qnames)) def end_element_ns(self, name): pair = name.split() if len(pair) == 1: pair = (None, name) elif len(pair) == 3: pair = pair[0], pair[1] else: pair = tuple(pair) self._cont_handler.endElementNS(pair, None) # this is not used (call directly to ContentHandler) def processing_instruction(self, target, data): self._cont_handler.processingInstruction(target, data) # this is not used (call directly to ContentHandler) def character_data(self, data): self._cont_handler.characters(data) def start_namespace_decl(self, prefix, uri): self._cont_handler.startPrefixMapping(prefix, uri) def end_namespace_decl(self, prefix): self._cont_handler.endPrefixMapping(prefix) def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): self._lex_handler_prop.startDTD(name, pubid, sysid) def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name) def notation_decl(self, name, base, sysid, pubid): self._dtd_handler.notationDecl(name, pubid, sysid) def external_entity_ref(self, context, base, sysid, pubid): if not self._external_ges: return 1 source = self._ent_handler.resolveEntity(pubid, sysid) source = saxutils.prepare_input_source(source, self._source.getSystemId() or "") self._entity_stack.append((self._parser, self._source)) self._parser = self._parser.ExternalEntityParserCreate(context) self._source = source try: xmlreader.IncrementalParser.parse(self, source) except: return 0 # FIXME: save error info here? (self._parser, self._source) = self._entity_stack[-1] del self._entity_stack[-1] return 1 def skipped_entity_handler(self, name, is_pe): if is_pe: # The SAX spec requires to report skipped PEs with a '%' name = '%'+name self._cont_handler.skippedEntity(name) # --- def create_parser(*args, **kwargs): return ExpatParser(*args, **kwargs) # --- if __name__ == "__main__": import xml.sax.saxutils p = create_parser() p.setContentHandler(xml.sax.saxutils.XMLGenerator()) p.setErrorHandler(xml.sax.ErrorHandler()) p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml")
bsd-3-clause
ericlink/adms-server
playframework-dist/play-1.1/python/Lib/compiler/syntax.py
25
1490
"""Check for errs in the AST. The Python parser does not catch all syntax errors. Others, like assignments with invalid targets, are caught in the code generation phase. The compiler package catches some errors in the transformer module. But it seems clearer to write checkers that use the AST to detect errors. """ from compiler import ast, walk def check(tree, multi=None): v = SyntaxErrorChecker(multi) walk(tree, v) return v.errors class SyntaxErrorChecker: """A visitor to find syntax errors in the AST.""" def __init__(self, multi=None): """Create new visitor object. If optional argument multi is not None, then print messages for each error rather than raising a SyntaxError for the first. """ self.multi = multi self.errors = 0 def error(self, node, msg): self.errors = self.errors + 1 if self.multi is not None: print "%s:%s: %s" % (node.filename, node.lineno, msg) else: raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno) def visitAssign(self, node): # the transformer module handles many of these pass ## for target in node.nodes: ## if isinstance(target, ast.AssList): ## if target.lineno is None: ## target.lineno = node.lineno ## self.error(target, "can't assign to list comprehension")
mit
ESOedX/edx-platform
openedx/core/djangoapps/enrollments/errors.py
75
1335
"""All Error Types pertaining to Enrollment.""" class CourseEnrollmentError(Exception): """Generic Course Enrollment Error. Describes any error that may occur when reading or updating enrollment information for a user or a course. """ def __init__(self, msg, data=None): super(CourseEnrollmentError, self).__init__(msg) # Corresponding information to help resolve the error. self.data = data class UserNotFoundError(CourseEnrollmentError): pass class CourseEnrollmentClosedError(CourseEnrollmentError): pass class CourseEnrollmentFullError(CourseEnrollmentError): pass class CourseEnrollmentExistsError(CourseEnrollmentError): enrollment = None def __init__(self, message, enrollment): super(CourseEnrollmentExistsError, self).__init__(message) self.enrollment = enrollment class CourseModeNotFoundError(CourseEnrollmentError): """The requested course mode could not be found.""" pass class EnrollmentNotFoundError(CourseEnrollmentError): """The requested enrollment could not be found.""" pass class EnrollmentApiLoadError(CourseEnrollmentError): """The data API could not be loaded.""" pass class InvalidEnrollmentAttribute(CourseEnrollmentError): """Enrollment Attributes could not be validated""" pass
agpl-3.0
DmitryYurov/BornAgain
Tests/Functional/Python/PyCore/transform_cube.py
2
7597
""" Test of rotation/positioning of simple cubic particle. Original particle is compared with the one obtained """ from __future__ import print_function import os, sys, unittest import utils from libBornAgainCore import * class RotationsCubeTest(unittest.TestCase): """ Test of rotations and translations of simple cube in three layers system """ def get_sample(self, formfactor, rot = None, pos = None, layout_rot = None, layout_pos = None, add_to="air"): mAmbience = HomogeneousMaterial("Air", 0.0, 0.0) mParticle = HomogeneousMaterial("Particle", 6e-4, 2e-8) mMiddle= HomogeneousMaterial("MidleLayer", 5e-5, 2e-8) mSubstrate = HomogeneousMaterial("Substrate", 6e-6, 2e-8) particle = Particle(mParticle, formfactor) if pos: particle.setPosition(pos) if rot: particle.setRotation(rot) layout = ParticleLayout() if layout_rot and layout_pos: layout.addParticle(particle, 1.0, layout_pos, layout_rot) elif layout_rot and not layout_pos: layout.addParticle(particle, 1.0, kvector_t(0, 0, 0), layout_rot) elif not layout_rot and layout_pos: layout.addParticle(particle, 1.0, layout_pos) else: layout.addParticle(particle) air_layer = Layer(mAmbience) middle_layer = Layer(mSubstrate, 50.0) substrate = Layer(mSubstrate) if add_to == "air": air_layer.addLayout(layout) else: middle_layer.addLayout(layout) multi_layer = MultiLayer() multi_layer.addLayer(air_layer) multi_layer.addLayer(middle_layer) multi_layer.addLayer(substrate) return multi_layer def get_result(self, data, add_to="air"): ff = data[0] rot = data[1] pos = data[2] layout_rot = data[3] layout_pos = data[4] sample = self.get_sample(ff, rot, pos, layout_rot, layout_pos, add_to) # simulation = self.get_simulation(sample) simulation = utils.get_simulation_MiniGISAS(sample) simulation.runSimulation() return simulation.result() def get_difference(self, reference_data, test_data, add_to="air"): intensity = self.get_result(test_data, add_to) return RelativeDifference(reference_data, intensity) def testRotationZ(self): """ Cube is Z-rotated either through setRotation method or through particle layout. The result is compared with unrotated cube. """ box = FormFactorBox(10, 10, 10) data_to_test = [ # ff rot pos layout_rot layout_pos (box, None, None, None, None), # reference (box, RotationZ(90.*degree), None, None, None), # rotating particle (box, RotationZ(-90.*degree), None, None, None), (box, RotationZ(180.*degree), None, None, None), (box, None, None, RotationZ(90.*degree), None), # rotating through layout (box, RotationZ(45.*degree), None, RotationZ(45.*degree), None), # cumulative rotation ] reference_data = self.get_result(data_to_test[0]) isSuccess = True for i in range(1, len(data_to_test)): diff = self.get_difference(reference_data, data_to_test[i]) print("{0} #{1} diff {2:.2e}".format(self.testRotationZ.__name__, i, diff)) if(diff > 1e-10) : isSuccess=False self.assertTrue(isSuccess) def testRotationY(self): """ Cube is Y-rotated either through setRotation method or through particle layout. Additional translation is applied if necessary. The result is compared with unrotated cube. """ box = FormFactorBox(10, 10, 10) data_to_test = [ # ff rot pos layout_rot layout_pos (box, None, None, None, None), # reference (box, RotationY(90.*degree), kvector_t(0,0,5.0), None, None), # rotating and translating (box, None, None, RotationY(90.*degree), kvector_t(0,0,5.0)), # rotating and translating (box, RotationY(90.*degree), None, None, kvector_t(0,0,5.0)), # rotating and translating (box, RotationY(45.*degree), kvector_t(0,0,0.0), RotationY(45.*degree), kvector_t(0,0,5.0)), # rotating and translating ] reference_data = self.get_result(data_to_test[0]) isSuccess = True for i in range(1, len(data_to_test)): diff = self.get_difference(reference_data, data_to_test[i]) print("{0} #{1} diff {2:.2e}".format(self.testRotationY.__name__, i, diff)) if(diff > 1e-10) : isSuccess=False self.assertTrue(isSuccess) def testRotationX(self): """ Cube is Z-rotated either through setRotation method or through particle layout. Additional translation is applied if necessary. The result is compared with unrotated cube. """ box = FormFactorBox(10, 10, 10) data_to_test = [ # ff rot pos layout_rot layout_pos (box, None, None, None, None), # reference (box, RotationX(90.*degree), kvector_t(0,0,5.0), None, None), # rotating and translating (box, None, None, RotationX(90.*degree), kvector_t(0,0,5.0)), # rotating and translating (box, RotationX(90.*degree), None, None, kvector_t(0,0,5.0)), # rotating and translating (box, RotationX(45.*degree), kvector_t(0,0,0.0), RotationX(45.*degree), kvector_t(0,0,5.0)), # rotating and translating ] reference_data = self.get_result(data_to_test[0]) isSuccess = True for i in range(1, len(data_to_test)): diff = self.get_difference(reference_data, data_to_test[i]) print("{0} #{1} diff {2:.2e}".format(self.testRotationX.__name__, i, diff)) if(diff > 1e-10) : isSuccess=False self.assertTrue(isSuccess) def testRotationsInMiddleLayer(self): """ """ box = FormFactorBox(10, 10, 10) data_to_test = [ # ff rot pos layout_rot layout_pos (box, None, kvector_t(0,0,-25.0), None, None), # reference (box, RotationX(90.*degree), kvector_t(0,0,-20.0), None, None), # rotating and translating ] reference_data = self.get_result(data_to_test[0], "add_to_middle") isSuccess = True for i in range(1, len(data_to_test)): diff = self.get_difference(reference_data, data_to_test[i], "add_to_middle") print("{0} #{1} diff {2:.2e}".format(self.testRotationX.__name__, i, diff)) if(diff > 1e-10) : isSuccess=False self.assertTrue(isSuccess) if __name__ == '__main__': unittest.main()
gpl-3.0
markYoungH/chromium.src
components/crash/tools/dmp2minidump.py
129
1353
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to extract minidumps from dmp crash dumps.""" import os import sys from cgi import parse_multipart def ProcessDump(dump_file, minidump_file): """Extracts the part of the dump file that minidump_stackwalk can read. The dump files generated by the breakpad integration multi-part form data that include the minidump as file attachment. Args: dump_file: the dump file that needs to be processed. minidump_file: the file to write the minidump to. """ try: dump = open(dump_file, 'rb') boundary = dump.readline().strip()[2:] data = parse_multipart(dump, {'boundary': boundary}) except: print 'Failed to read dmp file %s' % dump_file return if not 'upload_file_minidump' in data: print 'Could not find minidump file in dump.' return f = open(minidump_file, 'w') f.write("\r\n".join(data['upload_file_minidump'])) f.close() def main(): if len(sys.argv) != 3: print 'Usage: %s [dmp file] [minidump]' % sys.argv[0] print '' print 'Extracts the minidump stored in the crash dump file' return 1 ProcessDump(sys.argv[1], sys.argv[2]) if '__main__' == __name__: sys.exit(main())
bsd-3-clause
hdinsight/hue
desktop/core/ext-py/boto-2.38.0/boto/beanstalk/wrapper.py
153
1078
"""Wraps layer1 api methods and converts layer1 dict responses to objects.""" from boto.beanstalk.layer1 import Layer1 import boto.beanstalk.response from boto.exception import BotoServerError import boto.beanstalk.exception as exception def beanstalk_wrapper(func, name): def _wrapped_low_level_api(*args, **kwargs): try: response = func(*args, **kwargs) except BotoServerError as e: raise exception.simple(e) # Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'. cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response' cls = getattr(boto.beanstalk.response, cls_name) return cls(response) return _wrapped_low_level_api class Layer1Wrapper(object): def __init__(self, *args, **kwargs): self.api = Layer1(*args, **kwargs) def __getattr__(self, name): try: return beanstalk_wrapper(getattr(self.api, name), name) except AttributeError: raise AttributeError("%s has no attribute %r" % (self, name))
apache-2.0
Jasoning/namebench
nb_third_party/graphy/line_chart.py
205
4253
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code related to line charts.""" import copy import warnings from graphy import common class LineStyle(object): """Represents the style for a line on a line chart. Also provides some convenient presets. Object attributes (Passed directly to the Google Chart API. Check there for details): width: Width of the line on: Length of a line segment (for dashed/dotted lines) off: Length of a break (for dashed/dotted lines) color: Color of the line. A hex string, like 'ff0000' for red. Optional, AutoColor will fill this in for you automatically if empty. Some common styles, such as LineStyle.dashed, are available: LineStyle.solid() LineStyle.dashed() LineStyle.dotted() LineStyle.thick_solid() LineStyle.thick_dashed() LineStyle.thick_dotted() """ # Widths THIN = 1 THICK = 2 # Patterns # ((on, off) tuples, as passed to LineChart.AddLine) SOLID = (1, 0) DASHED = (8, 4) DOTTED = (2, 4) def __init__(self, width, on, off, color=None): """Construct a LineStyle. See class docstring for details on args.""" self.width = width self.on = on self.off = off self.color = color @classmethod def solid(cls): return LineStyle(1, 1, 0) @classmethod def dashed(cls): return LineStyle(1, 8, 4) @classmethod def dotted(cls): return LineStyle(1, 2, 4) @classmethod def thick_solid(cls): return LineStyle(2, 1, 0) @classmethod def thick_dashed(cls): return LineStyle(2, 8, 4) @classmethod def thick_dotted(cls): return LineStyle(2, 2, 4) class LineChart(common.BaseChart): """Represents a line chart.""" def __init__(self, points=None): super(LineChart, self).__init__() if points is not None: self.AddLine(points) def AddLine(self, points, label=None, color=None, pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None): """Add a new line to the chart. This is a convenience method which constructs the DataSeries and appends it for you. It returns the new series. points: List of equally-spaced y-values for the line label: Name of the line (used for the legend) color: Hex string, like 'ff0000' for red pattern: Tuple for (length of segment, length of gap). i.e. LineStyle.DASHED width: Width of the line (i.e. LineStyle.THIN) markers: List of Marker objects to attach to this line (see DataSeries for more info) """ if color is not None and isinstance(color[0], common.Marker): warnings.warn('Your code may be broken! ' 'You passed a list of Markers instead of a color. The ' 'old argument order (markers before color) is deprecated.', DeprecationWarning, stacklevel=2) style = LineStyle(width, pattern[0], pattern[1], color=color) series = common.DataSeries(points, label=label, style=style, markers=markers) self.data.append(series) return series def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None, label=None): """DEPRECATED""" warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ', DeprecationWarning, stacklevel=2) return self.AddLine(points, color=color, width=style.width, pattern=(style.on, style.off), markers=markers, label=label) class Sparkline(LineChart): """Represent a sparkline. These behave like LineCharts, mostly, but come without axes. """
apache-2.0
kiran/bart-sign
venv/lib/python2.7/site-packages/untangle.py
2
4047
#!/usr/bin/env python """ untangle Converts xml to python objects. The only method you need to call is parse() Partially inspired by xml2obj (http://code.activestate.com/recipes/149368-xml2obj/) Author: Christian Stefanescu (http://0chris.com) License: MIT License - http://www.opensource.org/licenses/mit-license.php """ import os from xml.sax import make_parser, handler try: from StringIO import StringIO except ImportError: from io import StringIO __version__ = '1.1.0' class Element(): """ Representation of an XML element. """ def __init__(self, name, attributes): self._name = name self._attributes = attributes self.children = [] self.is_root = False self.cdata = '' def add_child(self, element): self.children.append(element) def add_cdata(self, cdata): self.cdata = self.cdata + cdata def get_attribute(self, key): return self._attributes.get(key) def get_elements(self, name=None): if name: return [e for e in self.children if e._name == name] else: return self.children def __getitem__(self, key): return self.get_attribute(key) def __getattr__(self, key): matching_children = [x for x in self.children if x._name == key] if matching_children: if len(matching_children) == 1: self.__dict__[key] = matching_children[0] return matching_children[0] else: self.__dict__[key] = matching_children return matching_children else: raise IndexError('Unknown key <%s>' % key) def __iter__(self): yield self def __str__(self): return ( "Element <%s> with attributes %s and children %s" % (self._name, self._attributes, self.children) ) def __repr__(self): return ( "Element(name = %s, attributes = %s, cdata = %s)" % (self._name, self._attributes, self.cdata) ) def __nonzero__(self): return self.is_root or self._name is not None def __eq__(self, val): return self.cdata == val def __dir__(self): children_names = [x._name for x in self.children] return children_names class Handler(handler.ContentHandler): """ SAX handler which creates the Python object structure out of ``Element``s """ def __init__(self): self.root = Element(None, None) self.root.is_root = True self.elements = [] def startElement(self, name, attributes): name = name.replace('-', '_') name = name.replace('.', '_') name = name.replace(':', '_') attrs = dict() for k, v in attributes.items(): attrs[k] = v element = Element(name, attrs) if len(self.elements) > 0: self.elements[-1].add_child(element) else: self.root.add_child(element) self.elements.append(element) def endElement(self, name): self.elements.pop() def characters(self, cdata): self.elements[-1].add_cdata(cdata) def parse(filename): """ Interprets the given string as a filename, URL or XML data string, parses it and returns a Python object which represents the given document. Raises ``ValueError`` if the argument is None / empty string. Raises ``xml.sax.SAXParseException`` if something goes wrong during parsing.s """ if filename is None or filename.strip() == '': raise ValueError('parse() takes a filename, URL or XML string') parser = make_parser() sax_handler = Handler() parser.setContentHandler(sax_handler) if os.path.exists(filename) or is_url(filename): parser.parse(filename) else: parser.parse(StringIO(filename)) return sax_handler.root def is_url(string): return string.startswith('http://') or string.startswith('https://') # vim: set expandtab ts=4 sw=4:
mit
auferack08/edx-platform
cms/djangoapps/contentstore/tests/test_permissions.py
28
5759
""" Test CRUD for authorization. """ import copy from django.contrib.auth.models import User from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from contentstore.tests.utils import AjaxEnabledTestClient from opaque_keys.edx.locations import SlashSeparatedCourseKey from contentstore.utils import reverse_url, reverse_course_url from student.roles import CourseInstructorRole, CourseStaffRole, OrgStaffRole, OrgInstructorRole from contentstore.views.access import has_course_access from student import auth class TestCourseAccess(ModuleStoreTestCase): """ Course-based access (as opposed to access of a non-course xblock) """ def setUp(self): """ Create a staff user and log them in (creating the client). Create a pool of users w/o granting them any permissions """ user_password = super(TestCourseAccess, self).setUp() self.client = AjaxEnabledTestClient() self.client.login(username=self.user.username, password=user_password) # create a course via the view handler which has a different strategy for permissions than the factory self.course_key = SlashSeparatedCourseKey('myu', 'mydept.mycourse', 'myrun') course_url = reverse_url('course_handler') self.client.ajax_post(course_url, { 'org': self.course_key.org, 'number': self.course_key.course, 'display_name': 'My favorite course', 'run': self.course_key.run, } ) self.users = self._create_users() def _create_users(self): """ Create 8 users and return them """ users = [] for i in range(8): username = "user{}".format(i) email = "test+user{}@edx.org".format(i) user = User.objects.create_user(username, email, 'foo') user.is_active = True user.save() users.append(user) return users def tearDown(self): """ Reverse the setup """ self.client.logout() ModuleStoreTestCase.tearDown(self) def test_get_all_users(self): """ Test getting all authors for a course where their permissions run the gamut of allowed group types. """ # first check the course creator.has explicit access (don't use has_access as is_staff # will trump the actual test) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.user), "Didn't add creator as instructor." ) users = copy.copy(self.users) # doesn't use role.users_with_role b/c it's verifying the roles.py behavior user_by_role = {} # add the misc users to the course in different groups for role in [CourseInstructorRole, CourseStaffRole, OrgStaffRole, OrgInstructorRole]: user_by_role[role] = [] # Org-based roles are created via org name, rather than course_key if (role is OrgStaffRole) or (role is OrgInstructorRole): group = role(self.course_key.org) else: group = role(self.course_key) # NOTE: this loop breaks the roles.py abstraction by purposely assigning # users to one of each possible groupname in order to test that has_course_access # and remove_user work user = users.pop() group.add_users(user) user_by_role[role].append(user) self.assertTrue(has_course_access(user, self.course_key), "{} does not have access".format(user)) course_team_url = reverse_course_url('course_team_handler', self.course_key) response = self.client.get_html(course_team_url) for role in [CourseInstructorRole, CourseStaffRole]: # Global and org-based roles don't appear on this page for user in user_by_role[role]: self.assertContains(response, user.email) # test copying course permissions copy_course_key = SlashSeparatedCourseKey('copyu', 'copydept.mycourse', 'myrun') for role in [CourseInstructorRole, CourseStaffRole, OrgStaffRole, OrgInstructorRole]: if (role is OrgStaffRole) or (role is OrgInstructorRole): auth.add_users( self.user, role(copy_course_key.org), *role(self.course_key.org).users_with_role() ) else: auth.add_users( self.user, role(copy_course_key), *role(self.course_key).users_with_role() ) # verify access in copy course and verify that removal from source course w/ the various # groupnames works for role in [CourseInstructorRole, CourseStaffRole, OrgStaffRole, OrgInstructorRole]: for user in user_by_role[role]: # forcefully decache the groups: premise is that any real request will not have # multiple objects repr the same user but this test somehow uses different instance # in above add_users call if hasattr(user, '_roles'): del user._roles self.assertTrue(has_course_access(user, copy_course_key), "{} no copy access".format(user)) if (role is OrgStaffRole) or (role is OrgInstructorRole): auth.remove_users(self.user, role(self.course_key.org), user) else: auth.remove_users(self.user, role(self.course_key), user) self.assertFalse(has_course_access(user, self.course_key), "{} remove didn't work".format(user))
agpl-3.0
X-DataInitiative/tick
tick/survival/simu_sccs.py
2
26354
# License: BSD 3 clause from operator import itemgetter import numpy as np import scipy.sparse as sps from scipy.sparse import csr_matrix from tick.base.simulation import Simu from tick.hawkes import SimuHawkesExpKernels, SimuHawkesMulti from tick.preprocessing import LongitudinalFeaturesLagger from itertools import permutations from copy import deepcopy from scipy.stats import beta, norm class SimuSCCS(Simu): """Simulation of a Self Control Case Series (SCCS) model. This simulator can produce exposure (features), outcomes (labels) and censoring data. The features matrices are a `n_cases` list of numpy arrays (dense case) or csr_matrices (sparse case) of shape `(n_intervals, n_features)` containing exposures to each feature. Exposure can take two forms: - short repeated exposures (`single_exposure`): in that case, each column of the numpy arrays or csr matrices can contain multiple ones, each one representing an exposure for a particular time bucket. - infinite unique exposures (`multiple_exposure`): in that case, each column of the numpy arrays or csr matrices can only contain a single one, corresponding to the starting date of the exposure. Parameters ---------- n_cases : `int` Number of cases to generate. A case is a sample who experience at least one adverse event. n_intervals : `int` Number of time intervals used to generate features and outcomes. n_features : `int` Number of features to simulate for each case. n_lags : `numpy.ndarray`, shape=(n_features,), dtype="uint64" Number of lags per feature. The model will regress labels on the last observed values of the features over their corresponding `n_lags` time intervals. `n_lags` values must be between 0 and `n_intervals` - 1. exposure_type : {'single_exposure', 'multiple_exposure'}, default='single_exposure' Either 'single_exposure' for infinite unique exposures or 'multiple_exposure' for short repeated exposures. distribution : {'multinomial', 'poisson'}, default='multinomial' Distribution used to generate the outcomes. In the 'multinomial' case, the Poisson process used to generate the events is conditioned by total the number event per sample, which is set to be equal to one. In that case, the simulation matches exactly the SCCS model hypotheses. In the 'poisson' case, the outcomes are generated from a Poisson process, which can result in more than one outcome tick per sample. In this case, the first event is kept, and the other are discarded. sparse : `boolean`, default=True Generate sparse or dense features. censoring_prob : `float`, default=0. Probability that a sample is censored. Should be in [0, 1]. If 0, no censoring is applied. When > 0, SimuSCCS simulates a censoring vector. In that case, the features and outcomes are simulated, then right-censored according to the simulated censoring dates. censoring_scale : `float`, default=None The number of censored time intervals are drawn from a Poisson distribution with intensity equal to `censoring_scale`. The higher, the more intervals will be censored. If None, no censoring is applied. coeffs : `list` containing `numpy.ndarray`, default=None Can be used to provide your own set of coefficients. Element `i` of the list should be a 1-d `numpy.ndarray` of shape (n_lags + 1), where `n_lags[i]` is the number of lags associated to feature `i`. If set to None, the simulator will generate coefficients randomly. hawkes_exp_kernels : `SimuHawkesExpKernels`, default=None Features are simulated with exponential kernel Hawkes processes. This parameter can be used to specify your own kernels (see `SimuHawkesExpKernels` documentation). If None, random kernels are generated. The same kernels are used to generate features for the whole generated population. n_correlations : `int`, default=0 If `hawkes_exp_kernels` is None, random kernels are generated. This parameter controls the number of non-null non-diagonal kernels. batch_size : `int`, default=None When generating outcomes with Poisson distribution, the simulator will discard samples to which no event has occurred. In this case, the simulator generate successive batches of samples, until it reaches a total of n_samples. This parameter can be used to set the batch size. seed : `int`, default=None The seed of the random number generator verbose : `bool`, default=True If True, print things Examples -------- >>> import numpy as np >>> from tick.survival import SimuSCCS >>> n_lags = np.repeat(2, 2).astype('uint64') >>> sim = SimuSCCS(n_cases=5, n_intervals=3, n_features=2, n_lags=n_lags, ... seed=42, sparse=False, exposure_type="multiple_exposures", ... verbose=False) >>> features, labels, outcomes, censoring, _coeffs = sim.simulate() >>> print(features) [array([[0., 0.], [1., 0.], [1., 1.]]), array([[1., 0.], [1., 0.], [1., 1.]]), array([[1., 1.], [1., 1.], [1., 1.]]), array([[0., 0.], [1., 1.], [1., 0.]]), array([[1., 0.], [0., 0.], [0., 0.]])] >>> print(censoring) [3 3 3 3 3] >>> print(_coeffs) [array([ 0.54738557, -0.15109073, 0.71345739]), array([ 1.67633284, -0.25656871, -0.25655065])] """ _const_attr = [ # user defined parameters '_exposure_type', '_outcome_distribution', '_censoring_prob', '_censoring_scale', # redundant with prob ? '_batch_size', '_distribution', '_n_lags', # user defined or computed attributes '_hawkes_exp_kernel', '_coeffs', '_time_drift', '_features_offset' ] _attrinfos = {key: {'writable': False} for key in _const_attr} _attrinfos['hawkes_obj'] = {'writable': True} def __init__( self, n_cases, n_intervals, n_features, n_lags, time_drift=None, exposure_type="single_exposure", distribution="multinomial", sparse=True, censoring_prob=0, censoring_scale=None, coeffs=None, hawkes_exp_kernels=None, n_correlations=0, batch_size=None, seed=None, verbose=True, ): super(SimuSCCS, self).__init__(seed, verbose) self.n_cases = n_cases self.n_intervals = n_intervals self.n_features = n_features self._features_offset = None self._n_lags = None self.n_lags = n_lags self.sparse = sparse self.hawkes_obj = None # attributes with restricted value range self._exposure_type = None self.exposure_type = exposure_type self._distribution = None self.distribution = distribution self._censoring_prob = 0 self.censoring_prob = censoring_prob self._censoring_scale = None self.censoring_scale = censoring_scale if censoring_scale \ else n_intervals / 4 self._coeffs = None self.coeffs = coeffs self._batch_size = None self.batch_size = batch_size # TODO later: add properties for these parameters self.n_correlations = n_correlations self.hawkes_exp_kernels = hawkes_exp_kernels self.time_drift = time_drift # function(t), used only for the paper, allow to add a baseline # TODO: make a property from this baseline def simulate(self): """ Launch simulation of the data. Returns ------- features : `list` of `numpy.ndarray` or `list` of `scipy.sparse.csr_matrix`, list of length n_cases, each element of the list of shape=(n_intervals, n_features) The list of features matrices. labels : `list` of `numpy.ndarray`, list of length n_cases, each element of the list of shape=(n_intervals,) The labels vector censoring : `numpy.ndarray`, shape=(n_cases,), dtype="uint64" The censoring data. This array should contain integers in [1, n_intervals]. If the value i is equal to n_intervals, then there is no censoring for sample i. If censoring = c < n_intervals, then the observation of sample i is stopped at interval c, that is, the row c - 1 of the corresponding matrix. The last n_intervals - c rows are then set to 0. _coeffs : `numpy.ndarray`, shape=(n_features * (n_lags + 1),) The coefficients used to simulate the data. """ return Simu.simulate(self) def _simulate(self): """ Loop to generate batches of samples until n_cases is reached. """ n_lagged_features = int(self.n_lags.sum() + self.n_features) n_cases = self.n_cases if self._coeffs is None: self._set('_coeffs', np.random.normal(1e-3, 1.1, n_lagged_features)) features = [] censored_features = [] outcomes = [] censoring = np.zeros((n_cases,), dtype="uint64") cases_count = 0 while cases_count < n_cases: _features, _censored_features, _outcomes, _censoring, _n_samples = \ self._simulate_batch() n_new_cases = _n_samples c = cases_count cases_count += n_new_cases n = n_cases - c if cases_count >= n_cases else n_new_cases features.extend(_features[0:n]) censored_features.extend(_censored_features[0:n]) outcomes.extend(_outcomes[0:n]) censoring[c:c + n] = _censoring[0:n] return features, censored_features, outcomes, censoring, self.coeffs def _simulate_batch(self): """Simulate a batch of samples, each of which have ticked at least once. """ _features, _n_samples = self.simulate_features(self.batch_size) _censored_features = deepcopy(_features) _outcomes = self.simulate_outcomes(_features) _censoring = np.full((_n_samples,), self.n_intervals, dtype="uint64") if self.censoring_prob: censored_idx = np.random.binomial(1, self.censoring_prob, size=_n_samples).astype("bool") _censoring[censored_idx] -= np.random.poisson( lam=self.censoring_scale, size=(censored_idx.sum(),)).astype("uint64") _censored_features = self._censor_array_list( _censored_features, _censoring) _outcomes = self._censor_array_list(_outcomes, _censoring) _features, _censored_features, _outcomes, censoring, _ = \ self._filter_non_positive_samples(_features, _censored_features, _outcomes, _censoring) return _features, _censored_features, _outcomes, _censoring, _n_samples def simulate_features(self, n_samples): """Simulates features, either `single_exposure` or `multiple_exposures` exposures. """ if self.exposure_type == "single_exposure": features, n_samples = self._sim_single_exposures() elif self.exposure_type == "multiple_exposures": sim = self._sim_multiple_exposures_exposures features = [sim() for _ in range(n_samples)] return features, n_samples # We just keep it for the tests now # TODO later: need to be improved with Hawkes def _sim_multiple_exposures_exposures(self): features = np.zeros((self.n_intervals, self.n_features)) while features.sum() == 0: # Make sure we do not generate empty feature matrix features = np.random.randint( 2, size=(self.n_intervals, self.n_features), ).astype("float64") if self.sparse: features = csr_matrix(features, dtype="float64") return features def _sim_single_exposures(self): if not self.sparse: raise ValueError( "'single_exposure' exposures can only be simulated" " as sparse feature matrices") if self.hawkes_exp_kernels is None: np.random.seed(self.seed) decays = .002 * np.ones((self.n_features, self.n_features)) baseline = 4 * np.random.random(self.n_features) / self.n_intervals mult = np.random.random(self.n_features) adjacency = mult * np.eye(self.n_features) if self.n_correlations: comb = list(permutations(range(self.n_features), 2)) if len(comb) > 1: idx = itemgetter(*np.random.choice( range(len(comb)), size=self.n_correlations, replace=False)) comb = idx(comb) for i, j in comb: adjacency[i, j] = np.random.random(1) self._set( 'hawkes_exp_kernels', SimuHawkesExpKernels(adjacency=adjacency, decays=decays, baseline=baseline, verbose=False, seed=self.seed)) self.hawkes_exp_kernels.adjust_spectral_radius( .1) # TODO later: allow to change this parameter hawkes = SimuHawkesMulti(self.hawkes_exp_kernels, n_simulations=self.n_cases) run_time = self.n_intervals hawkes.end_time = [1 * run_time for _ in range(self.n_cases)] dt = 1 self.hawkes_exp_kernels.track_intensity(dt) hawkes.simulate() self.hawkes_obj = hawkes features = [[ np.min(np.floor(f)) if len(f) > 0 else -1 for f in patient_events ] for patient_events in hawkes.timestamps] features = [ self.to_coo(feat, (run_time, self.n_features)) for feat in features ] # Make sure patients have at least one exposure? exposures_filter = itemgetter( *[i for i, f in enumerate(features) if f.sum() > 0]) features = exposures_filter(features) n_samples = len(features) return features, n_samples def simulate_outcomes(self, features): features, _, _ = LongitudinalFeaturesLagger(n_lags=self.n_lags). \ fit_transform(features) if self.distribution == "poisson": # TODO later: add self.max_n_events to allow for multiple outcomes # In this case, the multinomial simulator should use this arg too outcomes = self._simulate_poisson_outcomes(features, self._coeffs) else: outcomes = self._simulate_multinomial_outcomes( features, self._coeffs) return outcomes def _simulate_multinomial_outcomes(self, features, coeffs): baseline = np.zeros(self.n_intervals) if self.time_drift is not None: baseline = self.time_drift(np.arange(self.n_intervals)) dot_products = [baseline + feat.dot(coeffs) for feat in features] def sim(dot_prod): dot_prod -= dot_prod.max() probabilities = np.exp(dot_prod) / np.sum(np.exp(dot_prod)) outcomes = np.random.multinomial(1, probabilities) return outcomes.astype("int32") return [sim(dot_product) for dot_product in dot_products] def _simulate_poisson_outcomes(self, features, coeffs, first_tick_only=True): baseline = np.zeros(self.n_intervals) if self.time_drift is not None: baseline = self.time_drift(np.arange(self.n_intervals)) dot_products = [baseline + feat.dot(coeffs) for feat in features] def sim(dot_prod): dot_prod -= dot_prod.max() intensities = np.exp(dot_prod) ticks = np.random.poisson(lam=intensities) if first_tick_only: first_tick_idx = np.argmax(ticks > 0) y = np.zeros_like(intensities) if ticks.sum() > 0: y[first_tick_idx] = 1 else: y = ticks return y.astype("int32") return [sim(dot_product) for dot_product in dot_products] @staticmethod def _censor_array_list(array_list, censoring): """Apply censoring to a list of array-like objects. Works for 1-D or 2-D arrays, as long as the first axis represents the time. Parameters ---------- array_list : list of numpy.ndarray or list of scipy.sparse.csr_matrix, list of length n_cases, each element of the list of shape=(n_intervals, n_features) or shape=(n_intervals,) The list of features matrices. censoring : `numpy.ndarray`, shape=(n_cases, 1), dtype="uint64" The censoring data. This array should contain integers in [1, n_intervals]. If the value i is equal to n_intervals, then there is no censoring for sample i. If censoring = c < n_intervals, then the observation of sample i is stopped at interval c, that is, the row c - 1 of the corresponding matrix. The last n_intervals - c rows are then set to 0. Returns ------- output : `[numpy.ndarrays]` or `[csr_matrices]`, shape=(n_intervals, n_features) The list of censored features matrices. """ def censor(array, censoring_idx): if sps.issparse(array): array = array.tolil() array[int(censoring_idx):] = 0 array = array.tocsr() else: array[int(censoring_idx):] = 0 return array return [censor(l, censoring[i]) for i, l in enumerate(array_list)] @staticmethod def _filter_non_positive_samples(features, features_censored, labels, censoring): """Filter out samples which don't tick in the observation window. Parameters ---------- features : list of numpy.ndarray or list of scipy.sparse.csr_matrix, list of length n_cases, each element of the list of shape=(n_intervals, n_features) The list of features matrices. labels : list of numpy.ndarray of length n_cases, shape=(n_intervals,) The list of labels matrices. """ nnz = [np.nonzero(arr)[0] for arr in labels] positive_sample_idx = [i for i, arr in enumerate(nnz) if len(arr) > 0] if len(positive_sample_idx) == 0: raise ValueError("There should be at least one positive sample per\ batch. Try to increase batch_size.") pos_samples_filter = itemgetter(*positive_sample_idx) return list(pos_samples_filter(features)),\ list(pos_samples_filter(features_censored)),\ list(pos_samples_filter(labels)),\ censoring[positive_sample_idx],\ np.array(positive_sample_idx, dtype="uint64") @staticmethod def to_coo(feat, shape): feat = np.array(feat) cols = np.where(feat >= 0)[0] rows = np.array(feat[feat >= 0]) if len(cols) == 0: cols = np.random.randint(0, shape[1], 1) rows = np.random.randint(0, shape[0], 1) data = np.ones_like(cols) return csr_matrix((data, (rows, cols)), shape=shape, dtype="float64") @property def exposure_type(self): return self._exposure_type @exposure_type.setter def exposure_type(self, value): if value not in ["single_exposure", "multiple_exposures"]: raise ValueError("exposure_type can be only 'single_exposure' or " "'multiple_exposures'.") self._set("_exposure_type", value) @property def distribution(self): return self._distribution @distribution.setter def distribution(self, value): if value not in ["multinomial", "poisson"]: raise ValueError("distribution can be only 'multinomial' or " "'poisson'.") self._set("_distribution", value) @property def censoring_prob(self): return self._censoring_prob @censoring_prob.setter def censoring_prob(self, value): if value < 0 or value > 1: raise ValueError("censoring_prob value should be in [0, 1].") self._set("_censoring_prob", value) @property def censoring_scale(self): return self._censoring_scale @censoring_scale.setter def censoring_scale(self, value): if value < 0: raise ValueError("censoring_scale should be greater than 0.") self._set("_censoring_scale", value) @property def n_lags(self): return self._n_lags @n_lags.setter def n_lags(self, value): offsets = [0] for l in value: if l < 0: raise ValueError('n_lags elements should be greater than or ' 'equal to 0.') offsets.append(offsets[-1] + l + 1) self._set('_n_lags', value) self._set('_features_offset', offsets) @property def coeffs(self): value = list() for i, l in enumerate(self.n_lags): start = int(self._features_offset[i]) end = int(start + l + 1) value.append(self._coeffs[start:end]) return value @coeffs.setter def coeffs(self, value): if value is not None: for i, c in enumerate(value): if c.shape[0] != int(self.n_lags[i] + 1): raise ValueError("Coeffs %i th element should be of shape\ (n_lags[%i] + 1),)" % (i, self.n_lags[i])) value = np.hstack(value) self._set("_coeffs", value) @property def batch_size(self): return self._batch_size @batch_size.setter def batch_size(self, value): if value is None and self.distribution == "multinomial": self._set("_batch_size", self.n_cases) elif value is None: self._set("_batch_size", int(min(2000, self.n_cases))) else: self._set("_batch_size", int(value)) self._set("_batch_size", max(100, self.batch_size)) class CustomEffects: def __init__(self, n_intervals): """Class provinding flexible relative incidence curves to be used as coefficients in the `SimuSCCS` class. Parameters ---------- n_intervals : `int` Number of time intervals used to generate features and outcomes. """ self.n_intervals = n_intervals self._curves_type_dict = { 1: (5, 1), 2: (2, 2), 3: (.5, .5), 4: (2, 5), 5: (1, 3) } def constant_effect(self, amplitude, cut=0): """Returns coefficients corresponding to a constant relative incidence of value equal to `amplitude`. If `cut` is greater than 0, the relative incidence will be null on [`cut`, `n_intervals`] """ risk_curve = np.ones(self.n_intervals) * amplitude if cut > 0: risk_curve[cut:] = 1 return risk_curve def bell_shaped_effect(self, amplitude, width, lag=0, cut=0): """Returns coefficients corresponding to a bell shaped relative incidence of max value equal to `amplitude`. If `cut` is greater than 0, the relative incidence will be null on [`cut`, `n_intervals`]. The effect starts at `lag` interval, and lasts `width` intervals. """ self._check_params(lag, width, amplitude, cut) if width % 2 == 0: width += 1 effect = norm(0, width / 5).pdf(np.arange(width) - int(width / 2)) return self._create_risk_curve(effect, amplitude, cut, width, lag) def increasing_effect(self, amplitude, lag=0, cut=0, curvature_type=1): """Returns coefficients corresponding to an increasing relative incidence of max value equal to `amplitude`. If `cut` is greater than 0, the relative incidence will be null on [`cut`, `n_intervals`]. The effect starts at `lag` interval, and lasts `width` intervals. The parameter `curvature_type` controls the shape of the relative incidence curve, it can take values in {1, 2, 3, 4, 5}. """ width = self.n_intervals self._check_params(lag, width, amplitude, cut) if curvature_type not in np.arange(5) + 1: raise ValueError('curvature type should be in {1, 2, 3, 4, 5}') a, b = self._curves_type_dict[curvature_type] effect = beta(a, b).cdf(np.arange(width) / width) return self._create_risk_curve(effect, amplitude, cut, width, lag) def _check_params(self, lag, width, amplitude, cut): if cut is not None and cut >= width: raise ValueError('cut should be < width') if lag > self.n_intervals: raise ValueError('n_intervals should be > lag') if amplitude <= 0: raise ValueError('amplitude should be > 0') def _create_risk_curve(self, effect, amplitude, cut, width, lag): if cut: effect = effect[:int(width - cut)] end_effect = int(lag + width - cut) if end_effect > self.n_intervals: end_effect = self.n_intervals effect = effect[:end_effect - lag] M = effect.max() m = effect.min() effect = (effect - m) / (M - m) effect *= (amplitude - 1) risk_curve = np.ones(self.n_intervals) risk_curve[lag:end_effect] += effect return risk_curve @staticmethod def negative_effect(positive_effect): return np.exp(-np.log(positive_effect))
bsd-3-clause
spreeker/democracygame
external_apps/docutils-snapshot/test/test_parsers/test_rst/test_directives/test_decorations.py
19
1847
#! /usr/bin/env python # $Id: test_decorations.py 4667 2006-07-12 21:40:56Z wiemann $ # Author: David Goodger <[email protected]> # Copyright: This module has been placed in the public domain. """ Tests for the "header" & "footer" directives. """ from __init__ import DocutilsTestSupport def suite(): s = DocutilsTestSupport.ParserTestSuite() s.generateTests(totest) return s totest = {} totest['headers'] = [ ["""\ .. header:: a paragraph for the header """, """\ <document source="test data"> <decoration> <header> <paragraph> a paragraph for the header """], ["""\ .. header:: """, """\ <document source="test data"> <system_message level="3" line="1" source="test data" type="ERROR"> <paragraph> Content block expected for the "header" directive; none found. <literal_block xml:space="preserve"> .. header:: """], ["""\ .. header:: first part of the header .. header:: second part of the header """, """\ <document source="test data"> <decoration> <header> <paragraph> first part of the header <paragraph> second part of the header """], ] totest['footers'] = [ ["""\ .. footer:: a paragraph for the footer """, """\ <document source="test data"> <decoration> <footer> <paragraph> a paragraph for the footer """], ["""\ .. footer:: even if a footer is declared first .. header:: the header appears first """, """\ <document source="test data"> <decoration> <header> <paragraph> the header appears first <footer> <paragraph> even if a footer is declared first """], ] if __name__ == '__main__': import unittest unittest.main(defaultTest='suite')
bsd-3-clause
pxzhenren/flask
tests/test_views.py
155
4202
# -*- coding: utf-8 -*- """ tests.views ~~~~~~~~~~~ Pluggable views. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import pytest import flask import flask.views from werkzeug.http import parse_set_header def common_test(app): c = app.test_client() assert c.get('/').data == b'GET' assert c.post('/').data == b'POST' assert c.put('/').status_code == 405 meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow']) assert sorted(meths) == ['GET', 'HEAD', 'OPTIONS', 'POST'] def test_basic_view(): app = flask.Flask(__name__) class Index(flask.views.View): methods = ['GET', 'POST'] def dispatch_request(self): return flask.request.method app.add_url_rule('/', view_func=Index.as_view('index')) common_test(app) def test_method_based_view(): app = flask.Flask(__name__) class Index(flask.views.MethodView): def get(self): return 'GET' def post(self): return 'POST' app.add_url_rule('/', view_func=Index.as_view('index')) common_test(app) def test_view_patching(): app = flask.Flask(__name__) class Index(flask.views.MethodView): def get(self): 1 // 0 def post(self): 1 // 0 class Other(Index): def get(self): return 'GET' def post(self): return 'POST' view = Index.as_view('index') view.view_class = Other app.add_url_rule('/', view_func=view) common_test(app) def test_view_inheritance(): app = flask.Flask(__name__) class Index(flask.views.MethodView): def get(self): return 'GET' def post(self): return 'POST' class BetterIndex(Index): def delete(self): return 'DELETE' app.add_url_rule('/', view_func=BetterIndex.as_view('index')) c = app.test_client() meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow']) assert sorted(meths) == ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST'] def test_view_decorators(): app = flask.Flask(__name__) def add_x_parachute(f): def new_function(*args, **kwargs): resp = flask.make_response(f(*args, **kwargs)) resp.headers['X-Parachute'] = 'awesome' return resp return new_function class Index(flask.views.View): decorators = [add_x_parachute] def dispatch_request(self): return 'Awesome' app.add_url_rule('/', view_func=Index.as_view('index')) c = app.test_client() rv = c.get('/') assert rv.headers['X-Parachute'] == 'awesome' assert rv.data == b'Awesome' def test_implicit_head(): app = flask.Flask(__name__) class Index(flask.views.MethodView): def get(self): return flask.Response('Blub', headers={ 'X-Method': flask.request.method }) app.add_url_rule('/', view_func=Index.as_view('index')) c = app.test_client() rv = c.get('/') assert rv.data == b'Blub' assert rv.headers['X-Method'] == 'GET' rv = c.head('/') assert rv.data == b'' assert rv.headers['X-Method'] == 'HEAD' def test_explicit_head(): app = flask.Flask(__name__) class Index(flask.views.MethodView): def get(self): return 'GET' def head(self): return flask.Response('', headers={'X-Method': 'HEAD'}) app.add_url_rule('/', view_func=Index.as_view('index')) c = app.test_client() rv = c.get('/') assert rv.data == b'GET' rv = c.head('/') assert rv.data == b'' assert rv.headers['X-Method'] == 'HEAD' def test_endpoint_override(): app = flask.Flask(__name__) app.debug = True class Index(flask.views.View): methods = ['GET', 'POST'] def dispatch_request(self): return flask.request.method app.add_url_rule('/', view_func=Index.as_view('index')) with pytest.raises(AssertionError): app.add_url_rule('/', view_func=Index.as_view('index')) # But these tests should still pass. We just log a warning. common_test(app)
bsd-3-clause
Frulko/AutobahnPython
examples/wamp/rpc/keyvalue/client.py
12
1841
############################################################################### ## ## Copyright 2011 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### import sys from twisted.python import log from twisted.internet import reactor from twisted.internet.defer import Deferred, DeferredList from autobahn.websocket import connectWS from autobahn.wamp import WampClientFactory, WampClientProtocol class KeyValueClientProtocol(WampClientProtocol): def done(self, *args): self.sendClose() reactor.stop() def show(self, key, value): print key, value def get(self, keys): defs = [] for key in keys: d = self.call("keyvalue:get", key).addCallback(lambda value, key = key: self.show(key, value)) defs.append(d) return DeferredList(defs) def onSessionOpen(self): self.prefix("keyvalue", "http://example.com/simple/keyvalue#") self.call("keyvalue:keys").addCallbacks(self.get).addCallback(self.done) if __name__ == '__main__': log.startLogging(sys.stdout) factory = WampClientFactory("ws://localhost:9000") factory.protocol = KeyValueClientProtocol connectWS(factory) reactor.run()
apache-2.0
Godley/MuseParse
MuseParse/tests/testHandlers/testHandlePartsAndPiece.py
1
8150
import unittest from MuseParse.classes.Input import MxmlParser from MuseParse.classes.ObjectHierarchy.TreeClasses.PieceTree import PieceTree class testSetupPiece(unittest.TestCase): def setUp(self): self.handler = MxmlParser.SetupPiece self.tags = [] self.attrs = {} self.chars = {} self.data = {} self.piece = PieceTree() def testNoTags(self): self.assertEqual( None, self.handler( self.tags, self.attrs, self.chars, self.piece, self.data), "ERROR: testNoTags failed: nothing should happen if there are no tags in list") def testMetaExists(self): self.assertFalse( hasattr( self.piece.GetItem(), "meta"), "ERROR: testMetaExists failed: meta should not be set in piece class at beginning of testing") def testIrrelevantTag(self): self.tags.append("lol") self.assertEqual( None, self.handler( self.tags, self.attrs, self.chars, self.piece, self.data), "ERROR: irrelevant tag should do nothing in TestIrrelevance") def testTitleTag(self): self.tags.append("movement-title") self.chars["movement-title"] = "hehehe" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertTrue(hasattr(self.piece.GetItem(), "meta"), "ERROR: Meta should exist in TestTitleTag") self.assertEqual( "hehehe", self.piece.GetItem().meta.title, "ERROR: title set incorrectly in TestTitleTag") def testRightsTag(self): self.tags.append("rights") self.chars["rights"] = "lee" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertTrue(hasattr(self.piece.GetItem(), "meta")) self.assertEqual("lee ", self.piece.GetItem().meta.copyright) def testCompTag(self): self.tags.append("creator") self.attrs["creator"] = {"type": "composer"} self.chars["creator"] = "lee" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertTrue( hasattr( self.piece.GetItem(), "meta"), "ERROR: meta should exist in piece class in TestCompTag") self.assertEqual( "lee", self.piece.GetItem().meta.composer, "ERROR: composer should match expected in TestCompTag") def testTitleCompTag(self): self.tags.append("creator") self.attrs["creator"] = {"type": "composer"} self.chars["creator"] = "lee" self.chars["movement-title"] = "hello world" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertTrue( hasattr( self.piece.GetItem().meta, "composer"), "ERROR: meta should have composer attrib in TestTitleCompTag") self.assertEqual( "lee", self.piece.GetItem().meta.composer, "ERROR: composer should match test in TestTitleCompTag") self.tags.append("movement-title") self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertTrue( hasattr( self.piece.GetItem().meta, "title"), "ERROR: meta should have title in TestTitleCompTag") self.assertEqual( "hello world", self.piece.GetItem().meta.title, "ERROR: meta title set incorrectly in TestTitleCompTag") class testHandlePart(unittest.TestCase): def setUp(self): self.handler = MxmlParser.UpdatePart self.tags = [] self.chars = {} self.attrs = {} self.piece = PieceTree() self.data = {} def testNoData(self): self.assertEqual( None, self.handler( self.tags, self.attrs, self.chars, self.piece, self.data), "ERROR: no tags should return none in TestNodata") def testIrrelevantTag(self): self.tags.append("wut") MxmlParser.part_id = None self.assertEqual( None, self.handler( self.tags, self.attrs, self.chars, self.piece, self.data), "ERROR: irrelevant tags should return none in TestIrrelevantTag") def testScorePartTag(self): MxmlParser.part_id = None self.assertEqual( None, MxmlParser.part_id, "ERROR: part_id not none in testScorePartTag") self.tags.append("score-part") self.attrs["score-part"] = {"id": "P1"} self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertEqual(1, len(self.piece.root.GetChildrenIndexes())) def testPnameTag(self): self.assertEqual(0, len(self.piece.root.GetChildrenIndexes())) self.tags.append("score-part") self.attrs["score-part"] = {"id": "P1"} self.tags.append("part-name") self.chars["part-name"] = "will" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertEqual("will", self.piece.getPart("P1").GetItem().name) def testPNameWithShortName(self): self.assertEqual(0, len(self.piece.root.GetChildrenIndexes())) self.tags.append("score-part") self.attrs["score-part"] = {"id": "P1"} self.tags.append("part-abbreviation") self.chars["part-abbreviation"] = "w" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertEqual("w", self.piece.getPart("P1").GetItem().shortname) def testPartGroupOpen(self): self.tags.append("part-group") self.attrs["part-group"] = {"number": "1", "type": "start"} self.tags.append("score-part") self.attrs["score-part"] = {"id": "P1"} self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.tags.append("score-part") self.attrs["score-part"] = {"id": "P2"} self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertEqual(["P1", "P2"], self.piece.getGroup(1)) def testPartGroupClose(self): self.tags.append("part-group") self.attrs["part-group"] = {"number": "1", "type": "start"} self.tags.append("score-part") self.attrs["score-part"] = {"id": "P1"} self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.tags.append("part-group") self.attrs["part-group"] = {"number": "1", "type": "stop"} self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.tags.append("score-part") self.attrs["score-part"] = {"id": "P2"} self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertEqual(["P1"], self.piece.getGroup(1)) class testRights(unittest.TestCase): def setUp(self): testSetupPiece.setUp(self) self.data = {} self.tags.append("credit") self.tags.append("credit-type") self.chars["credit-type"] = "rights" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) def testRightsCredit(self): self.tags.append("credit-words") self.chars["credit-words"] = "copyright lol" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertTrue(hasattr(self.piece.GetItem().meta, "copyright")) def testRightsValue(self): self.tags.append("credit-words") self.chars["credit-words"] = "copyright lol" self.handler(self.tags, self.attrs, self.chars, self.piece, self.data) self.assertEqual(self.piece.GetItem().meta.copyright, "copyright lol")
mit
areski/django
tests/inspectdb/models.py
208
2737
# -*- encoding: utf-8 -*- from __future__ import unicode_literals from django.db import models class People(models.Model): name = models.CharField(max_length=255) parent = models.ForeignKey('self', models.CASCADE) class Message(models.Model): from_field = models.ForeignKey(People, models.CASCADE, db_column='from_id') class PeopleData(models.Model): people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True) ssn = models.CharField(max_length=11) class PeopleMoreData(models.Model): people_unique = models.ForeignKey(People, models.CASCADE, unique=True) license = models.CharField(max_length=255) class DigitsInColumnName(models.Model): all_digits = models.CharField(max_length=11, db_column='123') leading_digit = models.CharField(max_length=11, db_column='4extra') leading_digits = models.CharField(max_length=11, db_column='45extra') class SpecialName(models.Model): field = models.IntegerField(db_column='field') # Underscores field_field_0 = models.IntegerField(db_column='Field_') field_field_1 = models.IntegerField(db_column='Field__') field_field_2 = models.IntegerField(db_column='__field') # Other chars prc_x = models.IntegerField(db_column='prc(%) x') non_ascii = models.IntegerField(db_column='tamaño') class Meta: db_table = "inspectdb_special.table name" class ColumnTypes(models.Model): id = models.AutoField(primary_key=True) big_int_field = models.BigIntegerField() bool_field = models.BooleanField(default=False) null_bool_field = models.NullBooleanField() char_field = models.CharField(max_length=10) null_char_field = models.CharField(max_length=10, blank=True, null=True) comma_separated_int_field = models.CommaSeparatedIntegerField(max_length=99) date_field = models.DateField() date_time_field = models.DateTimeField() decimal_field = models.DecimalField(max_digits=6, decimal_places=1) email_field = models.EmailField() file_field = models.FileField(upload_to="unused") file_path_field = models.FilePathField() float_field = models.FloatField() int_field = models.IntegerField() gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4") pos_int_field = models.PositiveIntegerField() pos_small_int_field = models.PositiveSmallIntegerField() slug_field = models.SlugField() small_int_field = models.SmallIntegerField() text_field = models.TextField() time_field = models.TimeField() url_field = models.URLField() class UniqueTogether(models.Model): field1 = models.IntegerField() field2 = models.CharField(max_length=10) class Meta: unique_together = ('field1', 'field2')
bsd-3-clause
jorik041/phantomjs
src/qt/qtbase/util/local_database/cldr2qlocalexml.py
102
42691
#!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies). ## Contact: http://www.qt-project.org/legal ## ## This file is part of the test suite of the Qt Toolkit. ## ## $QT_BEGIN_LICENSE:LGPL$ ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and Digia. For licensing terms and ## conditions see http://qt.digia.com/licensing. For further information ## use the contact form at http://qt.digia.com/contact-us. ## ## GNU Lesser General Public License Usage ## Alternatively, this file may be used under the terms of the GNU Lesser ## General Public License version 2.1 as published by the Free Software ## Foundation and appearing in the file LICENSE.LGPL included in the ## packaging of this file. Please review the following information to ## ensure the GNU Lesser General Public License version 2.1 requirements ## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ## ## In addition, as a special exception, Digia gives you certain additional ## rights. These rights are described in the Digia Qt LGPL Exception ## version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ## ## GNU General Public License Usage ## Alternatively, this file may be used under the terms of the GNU ## General Public License version 3.0 as published by the Free Software ## Foundation and appearing in the file LICENSE.GPL included in the ## packaging of this file. Please review the following information to ## ensure the GNU General Public License version 3.0 requirements will be ## met: http://www.gnu.org/copyleft/gpl.html. ## ## ## $QT_END_LICENSE$ ## ############################################################################# import os import sys import enumdata import xpathlite from xpathlite import DraftResolution from dateconverter import convert_date from xml.sax.saxutils import escape, unescape import re findAlias = xpathlite.findAlias findEntry = xpathlite.findEntry findEntryInFile = xpathlite._findEntryInFile findTagsInFile = xpathlite.findTagsInFile def parse_number_format(patterns, data): # this is a very limited parsing of the number format for currency only. def skip_repeating_pattern(x): p = x.replace('0', '#').replace(',', '').replace('.', '') seen = False result = '' for c in p: if c == '#': if seen: continue seen = True else: seen = False result = result + c return result patterns = patterns.split(';') result = [] for pattern in patterns: pattern = skip_repeating_pattern(pattern) pattern = pattern.replace('#', "%1") # according to http://www.unicode.org/reports/tr35/#Number_Format_Patterns # there can be doubled or trippled currency sign, however none of the # locales use that. pattern = pattern.replace(u'\xa4', "%2") pattern = pattern.replace("''", "###").replace("'", '').replace("###", "'") pattern = pattern.replace('-', data['minus']) pattern = pattern.replace('+', data['plus']) result.append(pattern) return result def parse_list_pattern_part_format(pattern): # this is a very limited parsing of the format for list pattern part only. result = "" result = pattern.replace("{0}", "%1") result = result.replace("{1}", "%2") result = result.replace("{2}", "%3") return result def ordStr(c): if len(c) == 1: return str(ord(c)) raise xpathlite.Error("Unable to handle value \"%s\"" % addEscapes(c)) return "##########" # the following functions are supposed to fix the problem with QLocale # returning a character instead of strings for QLocale::exponential() # and others. So we fallback to default values in these cases. def fixOrdStrMinus(c): if len(c) == 1: return str(ord(c)) return str(ord('-')) def fixOrdStrPlus(c): if len(c) == 1: return str(ord(c)) return str(ord('+')) def fixOrdStrExp(c): if len(c) == 1: return str(ord(c)) return str(ord('e')) def fixOrdStrPercent(c): if len(c) == 1: return str(ord(c)) return str(ord('%')) def fixOrdStrList(c): if len(c) == 1: return str(ord(c)) return str(ord(';')) def generateLocaleInfo(path): (dir_name, file_name) = os.path.split(path) if not path.endswith(".xml"): return {} # skip legacy/compatibility ones alias = findAlias(path) if alias: raise xpathlite.Error("alias to \"%s\"" % alias) language_code = findEntryInFile(path, "identity/language", attribute="type")[0] if language_code == 'root': # just skip it return {} country_code = findEntryInFile(path, "identity/territory", attribute="type")[0] script_code = findEntryInFile(path, "identity/script", attribute="type")[0] variant_code = findEntryInFile(path, "identity/variant", attribute="type")[0] # we do not support variants # ### actually there is only one locale with variant: en_US_POSIX # does anybody care about it at all? if variant_code: raise xpathlite.Error("we do not support variants (\"%s\")" % variant_code) language_id = enumdata.languageCodeToId(language_code) if language_id <= 0: raise xpathlite.Error("unknown language code \"%s\"" % language_code) language = enumdata.language_list[language_id][0] script_id = enumdata.scriptCodeToId(script_code) if script_id == -1: raise xpathlite.Error("unknown script code \"%s\"" % script_code) script = enumdata.script_list[script_id][0] # we should handle fully qualified names with the territory if not country_code: return {} country_id = enumdata.countryCodeToId(country_code) if country_id <= 0: raise xpathlite.Error("unknown country code \"%s\"" % country_code) country = enumdata.country_list[country_id][0] # So we say we accept only those values that have "contributed" or # "approved" resolution. see http://www.unicode.org/cldr/process.html # But we only respect the resolution for new datas for backward # compatibility. draft = DraftResolution.contributed result = {} result['language'] = language result['script'] = script result['country'] = country result['language_code'] = language_code result['country_code'] = country_code result['script_code'] = script_code result['variant_code'] = variant_code result['language_id'] = language_id result['script_id'] = script_id result['country_id'] = country_id supplementalPath = dir_name + "/../supplemental/supplementalData.xml" currencies = findTagsInFile(supplementalPath, "currencyData/region[iso3166=%s]"%country_code); result['currencyIsoCode'] = '' result['currencyDigits'] = 2 result['currencyRounding'] = 1 if currencies: for e in currencies: if e[0] == 'currency': tender = True t = filter(lambda x: x[0] == 'tender', e[1]) if t and t[0][1] == 'false': tender = False; if tender and not filter(lambda x: x[0] == 'to', e[1]): result['currencyIsoCode'] = filter(lambda x: x[0] == 'iso4217', e[1])[0][1] break if result['currencyIsoCode']: t = findTagsInFile(supplementalPath, "currencyData/fractions/info[iso4217=%s]"%result['currencyIsoCode']); if t and t[0][0] == 'info': result['currencyDigits'] = int(filter(lambda x: x[0] == 'digits', t[0][1])[0][1]) result['currencyRounding'] = int(filter(lambda x: x[0] == 'rounding', t[0][1])[0][1]) numbering_system = None try: numbering_system = findEntry(path, "numbers/defaultNumberingSystem") except: pass def findEntryDef(path, xpath, value=''): try: return findEntry(path, xpath) except xpathlite.Error: return value def get_number_in_system(path, xpath, numbering_system): if numbering_system: try: return findEntry(path, xpath + "[numberSystem=" + numbering_system + "]") except xpathlite.Error: # in CLDR 1.9 number system was refactored for numbers (but not for currency) # so if previous findEntry doesn't work we should try this: try: return findEntry(path, xpath.replace("/symbols/", "/symbols[numberSystem=" + numbering_system + "]/")) except xpathlite.Error: # fallback to default pass return findEntry(path, xpath) result['decimal'] = get_number_in_system(path, "numbers/symbols/decimal", numbering_system) result['group'] = get_number_in_system(path, "numbers/symbols/group", numbering_system) result['list'] = get_number_in_system(path, "numbers/symbols/list", numbering_system) result['percent'] = get_number_in_system(path, "numbers/symbols/percentSign", numbering_system) try: numbering_systems = {} for ns in findTagsInFile(cldr_dir + "/../supplemental/numberingSystems.xml", "numberingSystems"): tmp = {} id = "" for data in ns[1:][0]: # ns looks like this: [u'numberingSystem', [(u'digits', u'0123456789'), (u'type', u'numeric'), (u'id', u'latn')]] tmp[data[0]] = data[1] if data[0] == u"id": id = data[1] numbering_systems[id] = tmp result['zero'] = numbering_systems[numbering_system][u"digits"][0] except e: sys.stderr.write("Native zero detection problem:\n" + str(e) + "\n") result['zero'] = get_number_in_system(path, "numbers/symbols/nativeZeroDigit", numbering_system) result['minus'] = get_number_in_system(path, "numbers/symbols/minusSign", numbering_system) result['plus'] = get_number_in_system(path, "numbers/symbols/plusSign", numbering_system) result['exp'] = get_number_in_system(path, "numbers/symbols/exponential", numbering_system).lower() result['quotationStart'] = findEntry(path, "delimiters/quotationStart") result['quotationEnd'] = findEntry(path, "delimiters/quotationEnd") result['alternateQuotationStart'] = findEntry(path, "delimiters/alternateQuotationStart") result['alternateQuotationEnd'] = findEntry(path, "delimiters/alternateQuotationEnd") result['listPatternPartStart'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[start]")) result['listPatternPartMiddle'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[middle]")) result['listPatternPartEnd'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[end]")) result['listPatternPartTwo'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[2]")) result['am'] = findEntry(path, "dates/calendars/calendar[gregorian]/dayPeriods/dayPeriodContext[format]/dayPeriodWidth[wide]/dayPeriod[am]", draft) result['pm'] = findEntry(path, "dates/calendars/calendar[gregorian]/dayPeriods/dayPeriodContext[format]/dayPeriodWidth[wide]/dayPeriod[pm]", draft) result['longDateFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/dateFormats/dateFormatLength[full]/dateFormat/pattern")) result['shortDateFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/dateFormats/dateFormatLength[short]/dateFormat/pattern")) result['longTimeFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/timeFormats/timeFormatLength[full]/timeFormat/pattern")) result['shortTimeFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/timeFormats/timeFormatLength[short]/timeFormat/pattern")) endonym = None if country_code and script_code: endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s_%s]" % (language_code, script_code, country_code)) if not endonym and script_code: endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s]" % (language_code, script_code)) if not endonym and country_code: endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s]" % (language_code, country_code)) if not endonym: endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s]" % (language_code)) result['language_endonym'] = endonym result['country_endonym'] = findEntryDef(path, "localeDisplayNames/territories/territory[type=%s]" % (country_code)) currency_format = get_number_in_system(path, "numbers/currencyFormats/currencyFormatLength/currencyFormat/pattern", numbering_system) currency_format = parse_number_format(currency_format, result) result['currencyFormat'] = currency_format[0] result['currencyNegativeFormat'] = '' if len(currency_format) > 1: result['currencyNegativeFormat'] = currency_format[1] result['currencySymbol'] = '' result['currencyDisplayName'] = '' if result['currencyIsoCode']: result['currencySymbol'] = findEntryDef(path, "numbers/currencies/currency[%s]/symbol" % result['currencyIsoCode']) display_name_path = "numbers/currencies/currency[%s]/displayName" % result['currencyIsoCode'] result['currencyDisplayName'] \ = findEntryDef(path, display_name_path) + ";" \ + findEntryDef(path, display_name_path + "[count=zero]") + ";" \ + findEntryDef(path, display_name_path + "[count=one]") + ";" \ + findEntryDef(path, display_name_path + "[count=two]") + ";" \ + findEntryDef(path, display_name_path + "[count=few]") + ";" \ + findEntryDef(path, display_name_path + "[count=many]") + ";" \ + findEntryDef(path, display_name_path + "[count=other]") + ";" standalone_long_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[wide]/month" result['standaloneLongMonths'] \ = findEntry(path, standalone_long_month_path + "[1]") + ";" \ + findEntry(path, standalone_long_month_path + "[2]") + ";" \ + findEntry(path, standalone_long_month_path + "[3]") + ";" \ + findEntry(path, standalone_long_month_path + "[4]") + ";" \ + findEntry(path, standalone_long_month_path + "[5]") + ";" \ + findEntry(path, standalone_long_month_path + "[6]") + ";" \ + findEntry(path, standalone_long_month_path + "[7]") + ";" \ + findEntry(path, standalone_long_month_path + "[8]") + ";" \ + findEntry(path, standalone_long_month_path + "[9]") + ";" \ + findEntry(path, standalone_long_month_path + "[10]") + ";" \ + findEntry(path, standalone_long_month_path + "[11]") + ";" \ + findEntry(path, standalone_long_month_path + "[12]") + ";" standalone_short_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[abbreviated]/month" result['standaloneShortMonths'] \ = findEntry(path, standalone_short_month_path + "[1]") + ";" \ + findEntry(path, standalone_short_month_path + "[2]") + ";" \ + findEntry(path, standalone_short_month_path + "[3]") + ";" \ + findEntry(path, standalone_short_month_path + "[4]") + ";" \ + findEntry(path, standalone_short_month_path + "[5]") + ";" \ + findEntry(path, standalone_short_month_path + "[6]") + ";" \ + findEntry(path, standalone_short_month_path + "[7]") + ";" \ + findEntry(path, standalone_short_month_path + "[8]") + ";" \ + findEntry(path, standalone_short_month_path + "[9]") + ";" \ + findEntry(path, standalone_short_month_path + "[10]") + ";" \ + findEntry(path, standalone_short_month_path + "[11]") + ";" \ + findEntry(path, standalone_short_month_path + "[12]") + ";" standalone_narrow_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[narrow]/month" result['standaloneNarrowMonths'] \ = findEntry(path, standalone_narrow_month_path + "[1]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[2]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[3]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[4]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[5]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[6]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[7]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[8]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[9]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[10]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[11]") + ";" \ + findEntry(path, standalone_narrow_month_path + "[12]") + ";" long_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[wide]/month" result['longMonths'] \ = findEntry(path, long_month_path + "[1]") + ";" \ + findEntry(path, long_month_path + "[2]") + ";" \ + findEntry(path, long_month_path + "[3]") + ";" \ + findEntry(path, long_month_path + "[4]") + ";" \ + findEntry(path, long_month_path + "[5]") + ";" \ + findEntry(path, long_month_path + "[6]") + ";" \ + findEntry(path, long_month_path + "[7]") + ";" \ + findEntry(path, long_month_path + "[8]") + ";" \ + findEntry(path, long_month_path + "[9]") + ";" \ + findEntry(path, long_month_path + "[10]") + ";" \ + findEntry(path, long_month_path + "[11]") + ";" \ + findEntry(path, long_month_path + "[12]") + ";" short_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[abbreviated]/month" result['shortMonths'] \ = findEntry(path, short_month_path + "[1]") + ";" \ + findEntry(path, short_month_path + "[2]") + ";" \ + findEntry(path, short_month_path + "[3]") + ";" \ + findEntry(path, short_month_path + "[4]") + ";" \ + findEntry(path, short_month_path + "[5]") + ";" \ + findEntry(path, short_month_path + "[6]") + ";" \ + findEntry(path, short_month_path + "[7]") + ";" \ + findEntry(path, short_month_path + "[8]") + ";" \ + findEntry(path, short_month_path + "[9]") + ";" \ + findEntry(path, short_month_path + "[10]") + ";" \ + findEntry(path, short_month_path + "[11]") + ";" \ + findEntry(path, short_month_path + "[12]") + ";" narrow_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[narrow]/month" result['narrowMonths'] \ = findEntry(path, narrow_month_path + "[1]") + ";" \ + findEntry(path, narrow_month_path + "[2]") + ";" \ + findEntry(path, narrow_month_path + "[3]") + ";" \ + findEntry(path, narrow_month_path + "[4]") + ";" \ + findEntry(path, narrow_month_path + "[5]") + ";" \ + findEntry(path, narrow_month_path + "[6]") + ";" \ + findEntry(path, narrow_month_path + "[7]") + ";" \ + findEntry(path, narrow_month_path + "[8]") + ";" \ + findEntry(path, narrow_month_path + "[9]") + ";" \ + findEntry(path, narrow_month_path + "[10]") + ";" \ + findEntry(path, narrow_month_path + "[11]") + ";" \ + findEntry(path, narrow_month_path + "[12]") + ";" long_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[wide]/day" result['longDays'] \ = findEntry(path, long_day_path + "[sun]") + ";" \ + findEntry(path, long_day_path + "[mon]") + ";" \ + findEntry(path, long_day_path + "[tue]") + ";" \ + findEntry(path, long_day_path + "[wed]") + ";" \ + findEntry(path, long_day_path + "[thu]") + ";" \ + findEntry(path, long_day_path + "[fri]") + ";" \ + findEntry(path, long_day_path + "[sat]") + ";" short_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[abbreviated]/day" result['shortDays'] \ = findEntry(path, short_day_path + "[sun]") + ";" \ + findEntry(path, short_day_path + "[mon]") + ";" \ + findEntry(path, short_day_path + "[tue]") + ";" \ + findEntry(path, short_day_path + "[wed]") + ";" \ + findEntry(path, short_day_path + "[thu]") + ";" \ + findEntry(path, short_day_path + "[fri]") + ";" \ + findEntry(path, short_day_path + "[sat]") + ";" narrow_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[narrow]/day" result['narrowDays'] \ = findEntry(path, narrow_day_path + "[sun]") + ";" \ + findEntry(path, narrow_day_path + "[mon]") + ";" \ + findEntry(path, narrow_day_path + "[tue]") + ";" \ + findEntry(path, narrow_day_path + "[wed]") + ";" \ + findEntry(path, narrow_day_path + "[thu]") + ";" \ + findEntry(path, narrow_day_path + "[fri]") + ";" \ + findEntry(path, narrow_day_path + "[sat]") + ";" standalone_long_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[wide]/day" result['standaloneLongDays'] \ = findEntry(path, standalone_long_day_path + "[sun]") + ";" \ + findEntry(path, standalone_long_day_path + "[mon]") + ";" \ + findEntry(path, standalone_long_day_path + "[tue]") + ";" \ + findEntry(path, standalone_long_day_path + "[wed]") + ";" \ + findEntry(path, standalone_long_day_path + "[thu]") + ";" \ + findEntry(path, standalone_long_day_path + "[fri]") + ";" \ + findEntry(path, standalone_long_day_path + "[sat]") + ";" standalone_short_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[abbreviated]/day" result['standaloneShortDays'] \ = findEntry(path, standalone_short_day_path + "[sun]") + ";" \ + findEntry(path, standalone_short_day_path + "[mon]") + ";" \ + findEntry(path, standalone_short_day_path + "[tue]") + ";" \ + findEntry(path, standalone_short_day_path + "[wed]") + ";" \ + findEntry(path, standalone_short_day_path + "[thu]") + ";" \ + findEntry(path, standalone_short_day_path + "[fri]") + ";" \ + findEntry(path, standalone_short_day_path + "[sat]") + ";" standalone_narrow_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[narrow]/day" result['standaloneNarrowDays'] \ = findEntry(path, standalone_narrow_day_path + "[sun]") + ";" \ + findEntry(path, standalone_narrow_day_path + "[mon]") + ";" \ + findEntry(path, standalone_narrow_day_path + "[tue]") + ";" \ + findEntry(path, standalone_narrow_day_path + "[wed]") + ";" \ + findEntry(path, standalone_narrow_day_path + "[thu]") + ";" \ + findEntry(path, standalone_narrow_day_path + "[fri]") + ";" \ + findEntry(path, standalone_narrow_day_path + "[sat]") + ";" return result def addEscapes(s): result = '' for c in s: n = ord(c) if n < 128: result += c else: result += "\\x" result += "%02x" % (n) return result def unicodeStr(s): utf8 = s.encode('utf-8') return "<size>" + str(len(utf8)) + "</size><data>" + addEscapes(utf8) + "</data>" def usage(): print "Usage: cldr2qlocalexml.py <path-to-cldr-main>" sys.exit() def integrateWeekData(filePath): if not filePath.endswith(".xml"): return {} monFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=mon]", attribute="territories")[0].split(" ") tueFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=tue]", attribute="territories")[0].split(" ") wedFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=wed]", attribute="territories")[0].split(" ") thuFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=thu]", attribute="territories")[0].split(" ") friFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=fri]", attribute="territories")[0].split(" ") satFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=sat]", attribute="territories")[0].split(" ") sunFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=sun]", attribute="territories")[0].split(" ") monWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=mon]", attribute="territories")[0].split(" ") tueWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=tue]", attribute="territories")[0].split(" ") wedWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=wed]", attribute="territories")[0].split(" ") thuWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=thu]", attribute="territories")[0].split(" ") friWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=fri]", attribute="territories")[0].split(" ") satWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=sat]", attribute="territories")[0].split(" ") sunWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=sun]", attribute="territories")[0].split(" ") monWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=mon]", attribute="territories")[0].split(" ") tueWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=tue]", attribute="territories")[0].split(" ") wedWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=wed]", attribute="territories")[0].split(" ") thuWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=thu]", attribute="territories")[0].split(" ") friWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=fri]", attribute="territories")[0].split(" ") satWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=sat]", attribute="territories")[0].split(" ") sunWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=sun]", attribute="territories")[0].split(" ") firstDayByCountryCode = {} for countryCode in monFirstDayIn: firstDayByCountryCode[countryCode] = "mon" for countryCode in tueFirstDayIn: firstDayByCountryCode[countryCode] = "tue" for countryCode in wedFirstDayIn: firstDayByCountryCode[countryCode] = "wed" for countryCode in thuFirstDayIn: firstDayByCountryCode[countryCode] = "thu" for countryCode in friFirstDayIn: firstDayByCountryCode[countryCode] = "fri" for countryCode in satFirstDayIn: firstDayByCountryCode[countryCode] = "sat" for countryCode in sunFirstDayIn: firstDayByCountryCode[countryCode] = "sun" weekendStartByCountryCode = {} for countryCode in monWeekendStart: weekendStartByCountryCode[countryCode] = "mon" for countryCode in tueWeekendStart: weekendStartByCountryCode[countryCode] = "tue" for countryCode in wedWeekendStart: weekendStartByCountryCode[countryCode] = "wed" for countryCode in thuWeekendStart: weekendStartByCountryCode[countryCode] = "thu" for countryCode in friWeekendStart: weekendStartByCountryCode[countryCode] = "fri" for countryCode in satWeekendStart: weekendStartByCountryCode[countryCode] = "sat" for countryCode in sunWeekendStart: weekendStartByCountryCode[countryCode] = "sun" weekendEndByCountryCode = {} for countryCode in monWeekendEnd: weekendEndByCountryCode[countryCode] = "mon" for countryCode in tueWeekendEnd: weekendEndByCountryCode[countryCode] = "tue" for countryCode in wedWeekendEnd: weekendEndByCountryCode[countryCode] = "wed" for countryCode in thuWeekendEnd: weekendEndByCountryCode[countryCode] = "thu" for countryCode in friWeekendEnd: weekendEndByCountryCode[countryCode] = "fri" for countryCode in satWeekendEnd: weekendEndByCountryCode[countryCode] = "sat" for countryCode in sunWeekendEnd: weekendEndByCountryCode[countryCode] = "sun" for (key,locale) in locale_database.iteritems(): countryCode = locale['country_code'] if countryCode in firstDayByCountryCode: locale_database[key]['firstDayOfWeek'] = firstDayByCountryCode[countryCode] else: locale_database[key]['firstDayOfWeek'] = firstDayByCountryCode["001"] if countryCode in weekendStartByCountryCode: locale_database[key]['weekendStart'] = weekendStartByCountryCode[countryCode] else: locale_database[key]['weekendStart'] = weekendStartByCountryCode["001"] if countryCode in weekendEndByCountryCode: locale_database[key]['weekendEnd'] = weekendEndByCountryCode[countryCode] else: locale_database[key]['weekendEnd'] = weekendEndByCountryCode["001"] if len(sys.argv) != 2: usage() cldr_dir = sys.argv[1] if not os.path.isdir(cldr_dir): usage() cldr_files = os.listdir(cldr_dir) locale_database = {} for file in cldr_files: try: l = generateLocaleInfo(cldr_dir + "/" + file) if not l: sys.stderr.write("skipping file \"" + file + "\"\n") continue except xpathlite.Error as e: sys.stderr.write("skipping file \"%s\" (%s)\n" % (file, str(e))) continue locale_database[(l['language_id'], l['script_id'], l['country_id'], l['variant_code'])] = l integrateWeekData(cldr_dir+"/../supplemental/supplementalData.xml") locale_keys = locale_database.keys() locale_keys.sort() cldr_version = 'unknown' ldml = open(cldr_dir+"/../dtd/ldml.dtd", "r") for line in ldml: if 'version cldrVersion CDATA #FIXED' in line: cldr_version = line.split('"')[1] print "<localeDatabase>" print " <version>" + cldr_version + "</version>" print " <languageList>" for id in enumdata.language_list: l = enumdata.language_list[id] print " <language>" print " <name>" + l[0] + "</name>" print " <id>" + str(id) + "</id>" print " <code>" + l[1] + "</code>" print " </language>" print " </languageList>" print " <scriptList>" for id in enumdata.script_list: l = enumdata.script_list[id] print " <script>" print " <name>" + l[0] + "</name>" print " <id>" + str(id) + "</id>" print " <code>" + l[1] + "</code>" print " </script>" print " </scriptList>" print " <countryList>" for id in enumdata.country_list: l = enumdata.country_list[id] print " <country>" print " <name>" + l[0] + "</name>" print " <id>" + str(id) + "</id>" print " <code>" + l[1] + "</code>" print " </country>" print " </countryList>" def _parseLocale(l): language = "AnyLanguage" script = "AnyScript" country = "AnyCountry" if l == "und": raise xpathlite.Error("we are treating unknown locale like C") items = l.split("_") language_code = items[0] if language_code != "und": language_id = enumdata.languageCodeToId(language_code) if language_id == -1: raise xpathlite.Error("unknown language code \"%s\"" % language_code) language = enumdata.language_list[language_id][0] if len(items) > 1: script_code = items[1] country_code = "" if len(items) > 2: country_code = items[2] if len(script_code) == 4: script_id = enumdata.scriptCodeToId(script_code) if script_id == -1: raise xpathlite.Error("unknown script code \"%s\"" % script_code) script = enumdata.script_list[script_id][0] else: country_code = script_code if country_code: country_id = enumdata.countryCodeToId(country_code) if country_id == -1: raise xpathlite.Error("unknown country code \"%s\"" % country_code) country = enumdata.country_list[country_id][0] return (language, script, country) print " <likelySubtags>" for ns in findTagsInFile(cldr_dir + "/../supplemental/likelySubtags.xml", "likelySubtags"): tmp = {} for data in ns[1:][0]: # ns looks like this: [u'likelySubtag', [(u'from', u'aa'), (u'to', u'aa_Latn_ET')]] tmp[data[0]] = data[1] try: (from_language, from_script, from_country) = _parseLocale(tmp[u"from"]) except xpathlite.Error as e: sys.stderr.write("skipping likelySubtag \"%s\" -> \"%s\" (%s)\n" % (tmp[u"from"], tmp[u"to"], str(e))) continue try: (to_language, to_script, to_country) = _parseLocale(tmp[u"to"]) except xpathlite.Error as e: sys.stderr.write("skipping likelySubtag \"%s\" -> \"%s\" (%s)\n" % (tmp[u"from"], tmp[u"to"], str(e))) continue # substitute according to http://www.unicode.org/reports/tr35/#Likely_Subtags if to_country == "AnyCountry" and from_country != to_country: to_country = from_country if to_script == "AnyScript" and from_script != to_script: to_script = from_script print " <likelySubtag>" print " <from>" print " <language>" + from_language + "</language>" print " <script>" + from_script + "</script>" print " <country>" + from_country + "</country>" print " </from>" print " <to>" print " <language>" + to_language + "</language>" print " <script>" + to_script + "</script>" print " <country>" + to_country + "</country>" print " </to>" print " </likelySubtag>" print " </likelySubtags>" print " <localeList>" print \ " <locale>\n\ <language>C</language>\n\ <languageEndonym></languageEndonym>\n\ <script>AnyScript</script>\n\ <country>AnyCountry</country>\n\ <countryEndonym></countryEndonym>\n\ <decimal>46</decimal>\n\ <group>44</group>\n\ <list>59</list>\n\ <percent>37</percent>\n\ <zero>48</zero>\n\ <minus>45</minus>\n\ <plus>43</plus>\n\ <exp>101</exp>\n\ <quotationStart>\"</quotationStart>\n\ <quotationEnd>\"</quotationEnd>\n\ <alternateQuotationStart>\'</alternateQuotationStart>\n\ <alternateQuotationEnd>\'</alternateQuotationEnd>\n\ <listPatternPartStart>%1, %2</listPatternPartStart>\n\ <listPatternPartMiddle>%1, %2</listPatternPartMiddle>\n\ <listPatternPartEnd>%1, %2</listPatternPartEnd>\n\ <listPatternPartTwo>%1, %2</listPatternPartTwo>\n\ <am>AM</am>\n\ <pm>PM</pm>\n\ <firstDayOfWeek>mon</firstDayOfWeek>\n\ <weekendStart>sat</weekendStart>\n\ <weekendEnd>sun</weekendEnd>\n\ <longDateFormat>EEEE, d MMMM yyyy</longDateFormat>\n\ <shortDateFormat>d MMM yyyy</shortDateFormat>\n\ <longTimeFormat>HH:mm:ss z</longTimeFormat>\n\ <shortTimeFormat>HH:mm:ss</shortTimeFormat>\n\ <standaloneLongMonths>January;February;March;April;May;June;July;August;September;October;November;December;</standaloneLongMonths>\n\ <standaloneShortMonths>Jan;Feb;Mar;Apr;May;Jun;Jul;Aug;Sep;Oct;Nov;Dec;</standaloneShortMonths>\n\ <standaloneNarrowMonths>J;F;M;A;M;J;J;A;S;O;N;D;</standaloneNarrowMonths>\n\ <longMonths>January;February;March;April;May;June;July;August;September;October;November;December;</longMonths>\n\ <shortMonths>Jan;Feb;Mar;Apr;May;Jun;Jul;Aug;Sep;Oct;Nov;Dec;</shortMonths>\n\ <narrowMonths>1;2;3;4;5;6;7;8;9;10;11;12;</narrowMonths>\n\ <longDays>Sunday;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;</longDays>\n\ <shortDays>Sun;Mon;Tue;Wed;Thu;Fri;Sat;</shortDays>\n\ <narrowDays>7;1;2;3;4;5;6;</narrowDays>\n\ <standaloneLongDays>Sunday;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;</standaloneLongDays>\n\ <standaloneShortDays>Sun;Mon;Tue;Wed;Thu;Fri;Sat;</standaloneShortDays>\n\ <standaloneNarrowDays>S;M;T;W;T;F;S;</standaloneNarrowDays>\n\ <currencyIsoCode></currencyIsoCode>\n\ <currencySymbol></currencySymbol>\n\ <currencyDisplayName>;;;;;;;</currencyDisplayName>\n\ <currencyDigits>2</currencyDigits>\n\ <currencyRounding>1</currencyRounding>\n\ <currencyFormat>%1%2</currencyFormat>\n\ <currencyNegativeFormat></currencyNegativeFormat>\n\ </locale>" for key in locale_keys: l = locale_database[key] print " <locale>" print " <language>" + l['language'] + "</language>" print " <languageEndonym>" + escape(l['language_endonym']).encode('utf-8') + "</languageEndonym>" print " <script>" + l['script'] + "</script>" print " <country>" + l['country'] + "</country>" print " <countryEndonym>" + escape(l['country_endonym']).encode('utf-8') + "</countryEndonym>" print " <languagecode>" + l['language_code'] + "</languagecode>" print " <scriptcode>" + l['script_code'] + "</scriptcode>" print " <countrycode>" + l['country_code'] + "</countrycode>" print " <decimal>" + ordStr(l['decimal']) + "</decimal>" print " <group>" + ordStr(l['group']) + "</group>" print " <list>" + fixOrdStrList(l['list']) + "</list>" print " <percent>" + fixOrdStrPercent(l['percent']) + "</percent>" print " <zero>" + ordStr(l['zero']) + "</zero>" print " <minus>" + fixOrdStrMinus(l['minus']) + "</minus>" print " <plus>" + fixOrdStrPlus(l['plus']) + "</plus>" print " <exp>" + fixOrdStrExp(l['exp']) + "</exp>" print " <quotationStart>" + l['quotationStart'].encode('utf-8') + "</quotationStart>" print " <quotationEnd>" + l['quotationEnd'].encode('utf-8') + "</quotationEnd>" print " <alternateQuotationStart>" + l['alternateQuotationStart'].encode('utf-8') + "</alternateQuotationStart>" print " <alternateQuotationEnd>" + l['alternateQuotationEnd'].encode('utf-8') + "</alternateQuotationEnd>" print " <listPatternPartStart>" + l['listPatternPartStart'].encode('utf-8') + "</listPatternPartStart>" print " <listPatternPartMiddle>" + l['listPatternPartMiddle'].encode('utf-8') + "</listPatternPartMiddle>" print " <listPatternPartEnd>" + l['listPatternPartEnd'].encode('utf-8') + "</listPatternPartEnd>" print " <listPatternPartTwo>" + l['listPatternPartTwo'].encode('utf-8') + "</listPatternPartTwo>" print " <am>" + l['am'].encode('utf-8') + "</am>" print " <pm>" + l['pm'].encode('utf-8') + "</pm>" print " <firstDayOfWeek>" + l['firstDayOfWeek'].encode('utf-8') + "</firstDayOfWeek>" print " <weekendStart>" + l['weekendStart'].encode('utf-8') + "</weekendStart>" print " <weekendEnd>" + l['weekendEnd'].encode('utf-8') + "</weekendEnd>" print " <longDateFormat>" + l['longDateFormat'].encode('utf-8') + "</longDateFormat>" print " <shortDateFormat>" + l['shortDateFormat'].encode('utf-8') + "</shortDateFormat>" print " <longTimeFormat>" + l['longTimeFormat'].encode('utf-8') + "</longTimeFormat>" print " <shortTimeFormat>" + l['shortTimeFormat'].encode('utf-8') + "</shortTimeFormat>" print " <standaloneLongMonths>" + l['standaloneLongMonths'].encode('utf-8') + "</standaloneLongMonths>" print " <standaloneShortMonths>"+ l['standaloneShortMonths'].encode('utf-8') + "</standaloneShortMonths>" print " <standaloneNarrowMonths>"+ l['standaloneNarrowMonths'].encode('utf-8') + "</standaloneNarrowMonths>" print " <longMonths>" + l['longMonths'].encode('utf-8') + "</longMonths>" print " <shortMonths>" + l['shortMonths'].encode('utf-8') + "</shortMonths>" print " <narrowMonths>" + l['narrowMonths'].encode('utf-8') + "</narrowMonths>" print " <longDays>" + l['longDays'].encode('utf-8') + "</longDays>" print " <shortDays>" + l['shortDays'].encode('utf-8') + "</shortDays>" print " <narrowDays>" + l['narrowDays'].encode('utf-8') + "</narrowDays>" print " <standaloneLongDays>" + l['standaloneLongDays'].encode('utf-8') + "</standaloneLongDays>" print " <standaloneShortDays>" + l['standaloneShortDays'].encode('utf-8') + "</standaloneShortDays>" print " <standaloneNarrowDays>" + l['standaloneNarrowDays'].encode('utf-8') + "</standaloneNarrowDays>" print " <currencyIsoCode>" + l['currencyIsoCode'].encode('utf-8') + "</currencyIsoCode>" print " <currencySymbol>" + l['currencySymbol'].encode('utf-8') + "</currencySymbol>" print " <currencyDisplayName>" + l['currencyDisplayName'].encode('utf-8') + "</currencyDisplayName>" print " <currencyDigits>" + str(l['currencyDigits']) + "</currencyDigits>" print " <currencyRounding>" + str(l['currencyRounding']) + "</currencyRounding>" print " <currencyFormat>" + l['currencyFormat'].encode('utf-8') + "</currencyFormat>" print " <currencyNegativeFormat>" + l['currencyNegativeFormat'].encode('utf-8') + "</currencyNegativeFormat>" print " </locale>" print " </localeList>" print "</localeDatabase>"
bsd-3-clause
cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools
third_party/pylint/lint.py
46
56500
# Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:[email protected] # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ %prog [options] module_or_package Check that a module satisfies a coding standard (and more !). %prog --help Display this help message and exit. %prog --help-msg <msg-id>[,<msg-id>] Display help messages about given message identifiers and exit. """ from __future__ import print_function import collections import contextlib import itertools import operator import os try: import multiprocessing except ImportError: multiprocessing = None import sys import tokenize import warnings import astroid from astroid.__pkginfo__ import version as astroid_version from astroid import modutils from logilab.common import configuration from logilab.common import optik_ext from logilab.common import interface from logilab.common import textutils from logilab.common import ureports from logilab.common.__pkginfo__ import version as common_version import six from pylint import checkers from pylint import interfaces from pylint import reporters from pylint import utils from pylint import config from pylint.__pkginfo__ import version MANAGER = astroid.MANAGER def _get_new_args(message): location = ( message.abspath, message.path, message.module, message.obj, message.line, message.column, ) return ( message.msg_id, message.symbol, location, message.msg, message.confidence, ) def _get_python_path(filepath): dirname = os.path.realpath(os.path.expanduser(filepath)) if not os.path.isdir(dirname): dirname = os.path.dirname(dirname) while True: if not os.path.exists(os.path.join(dirname, "__init__.py")): return dirname old_dirname = dirname dirname = os.path.dirname(dirname) if old_dirname == dirname: return os.getcwd() def _merge_stats(stats): merged = {} for stat in stats: for key, item in six.iteritems(stat): if key not in merged: merged[key] = item else: if isinstance(item, dict): merged[key].update(item) else: merged[key] = merged[key] + item return merged # Python Linter class ######################################################### MSGS = { 'F0001': ('%s', 'fatal', 'Used when an error occurred preventing the analysis of a \ module (unable to find it for instance).'), 'F0002': ('%s: %s', 'astroid-error', 'Used when an unexpected error occurred while building the ' 'Astroid representation. This is usually accompanied by a ' 'traceback. Please report such errors !'), 'F0003': ('ignored builtin module %s', 'ignored-builtin-module', 'Used to indicate that the user asked to analyze a builtin ' 'module which has been skipped.'), 'F0010': ('error while code parsing: %s', 'parse-error', 'Used when an exception occured while building the Astroid ' 'representation which could be handled by astroid.'), 'I0001': ('Unable to run raw checkers on built-in module %s', 'raw-checker-failed', 'Used to inform that a built-in module has not been checked ' 'using the raw checkers.'), 'I0010': ('Unable to consider inline option %r', 'bad-inline-option', 'Used when an inline option is either badly formatted or can\'t ' 'be used inside modules.'), 'I0011': ('Locally disabling %s (%s)', 'locally-disabled', 'Used when an inline option disables a message or a messages ' 'category.'), 'I0012': ('Locally enabling %s (%s)', 'locally-enabled', 'Used when an inline option enables a message or a messages ' 'category.'), 'I0013': ('Ignoring entire file', 'file-ignored', 'Used to inform that the file will not be checked'), 'I0020': ('Suppressed %s (from line %d)', 'suppressed-message', 'A message was triggered on a line, but suppressed explicitly ' 'by a disable= comment in the file. This message is not ' 'generated for messages that are ignored due to configuration ' 'settings.'), 'I0021': ('Useless suppression of %s', 'useless-suppression', 'Reported when a message is explicitly disabled for a line or ' 'a block of code, but never triggered.'), 'I0022': ('Pragma "%s" is deprecated, use "%s" instead', 'deprecated-pragma', 'Some inline pylint options have been renamed or reworked, ' 'only the most recent form should be used. ' 'NOTE:skip-all is only available with pylint >= 0.26', {'old_names': [('I0014', 'deprecated-disable-all')]}), 'E0001': ('%s', 'syntax-error', 'Used when a syntax error is raised for a module.'), 'E0011': ('Unrecognized file option %r', 'unrecognized-inline-option', 'Used when an unknown inline option is encountered.'), 'E0012': ('Bad option value %r', 'bad-option-value', 'Used when a bad value for an inline option is encountered.'), } def _deprecated_option(shortname, opt_type): def _warn_deprecated(option, optname, *args): # pylint: disable=unused-argument sys.stderr.write('Warning: option %s is deprecated and ignored.\n' % (optname,)) return {'short': shortname, 'help': 'DEPRECATED', 'hide': True, 'type': opt_type, 'action': 'callback', 'callback': _warn_deprecated} if multiprocessing is not None: class ChildLinter(multiprocessing.Process): # pylint: disable=no-member def run(self): tasks_queue, results_queue, self._config = self._args # pylint: disable=no-member self._config["jobs"] = 1 # Child does not parallelize any further. # Run linter for received files/modules. for file_or_module in iter(tasks_queue.get, 'STOP'): result = self._run_linter(file_or_module[0]) try: results_queue.put(result) except Exception as ex: print("internal error with sending report for module %s" % file_or_module, file=sys.stderr) print(ex, file=sys.stderr) results_queue.put({}) def _run_linter(self, file_or_module): linter = PyLinter() # Register standard checkers. linter.load_default_plugins() # Load command line plugins. # TODO linter.load_plugin_modules(self._plugins) linter.load_configuration(**self._config) linter.set_reporter(reporters.CollectingReporter()) # Run the checks. linter.check(file_or_module) msgs = [_get_new_args(m) for m in linter.reporter.messages] return (file_or_module, linter.file_state.base_name, linter.current_name, msgs, linter.stats, linter.msg_status) class PyLinter(configuration.OptionsManagerMixIn, utils.MessagesHandlerMixIn, utils.ReportsHandlerMixIn, checkers.BaseTokenChecker): """lint Python modules using external checkers. This is the main checker controlling the other ones and the reports generation. It is itself both a raw checker and an astroid checker in order to: * handle message activation / deactivation at the module level * handle some basic but necessary stats'data (number of classes, methods...) IDE plugins developpers: you may have to call `astroid.builder.MANAGER.astroid_cache.clear()` accross run if you want to ensure the latest code version is actually checked. """ __implements__ = (interfaces.ITokenChecker, ) name = 'master' priority = 0 level = 0 msgs = MSGS @staticmethod def make_options(): return (('ignore', {'type' : 'csv', 'metavar' : '<file>[,<file>...]', 'dest' : 'black_list', 'default' : ('CVS',), 'help' : 'Add files or directories to the blacklist. ' 'They should be base names, not paths.'}), ('persistent', {'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>', 'level': 1, 'help' : 'Pickle collected data for later comparisons.'}), ('load-plugins', {'type' : 'csv', 'metavar' : '<modules>', 'default' : (), 'level': 1, 'help' : 'List of plugins (as comma separated values of ' 'python modules names) to load, usually to register ' 'additional checkers.'}), ('output-format', {'default': 'text', 'type': 'string', 'metavar' : '<format>', 'short': 'f', 'group': 'Reports', 'help' : 'Set the output format. Available formats are text,' ' parseable, colorized, msvs (visual studio) and html. You ' 'can also give a reporter class, eg mypackage.mymodule.' 'MyReporterClass.'}), ('files-output', {'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>', 'group': 'Reports', 'level': 1, 'help' : 'Put messages in a separate file for each module / ' 'package specified on the command line instead of printing ' 'them on stdout. Reports (if any) will be written in a file ' 'name "pylint_global.[txt|html]".'}), ('reports', {'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>', 'short': 'r', 'group': 'Reports', 'help' : 'Tells whether to display a full report or only the ' 'messages'}), ('evaluation', {'type' : 'string', 'metavar' : '<python_expression>', 'group': 'Reports', 'level': 1, 'default': '10.0 - ((float(5 * error + warning + refactor + ' 'convention) / statement) * 10)', 'help' : 'Python expression which should return a note less ' 'than 10 (10 is the highest note). You have access ' 'to the variables errors warning, statement which ' 'respectively contain the number of errors / ' 'warnings messages and the total number of ' 'statements analyzed. This is used by the global ' 'evaluation report (RP0004).'}), ('comment', {'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>', 'group': 'Reports', 'level': 1, 'help' : 'Add a comment according to your evaluation note. ' 'This is used by the global evaluation report (RP0004).'}), ('confidence', {'type' : 'multiple_choice', 'metavar': '<levels>', 'default': '', 'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS], 'group': 'Messages control', 'help' : 'Only show warnings with the listed confidence levels.' ' Leave empty to show all. Valid levels: %s' % ( ', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}), ('enable', {'type' : 'csv', 'metavar': '<msg ids>', 'short': 'e', 'group': 'Messages control', 'help' : 'Enable the message, report, category or checker with the ' 'given id(s). You can either give multiple identifier ' 'separated by comma (,) or put this option multiple time. ' 'See also the "--disable" option for examples. '}), ('disable', {'type' : 'csv', 'metavar': '<msg ids>', 'short': 'd', 'group': 'Messages control', 'help' : 'Disable the message, report, category or checker ' 'with the given id(s). You can either give multiple identifiers' ' separated by comma (,) or put this option multiple times ' '(only on the command line, not in the configuration file ' 'where it should appear only once).' 'You can also use "--disable=all" to disable everything first ' 'and then reenable specific checks. For example, if you want ' 'to run only the similarities checker, you can use ' '"--disable=all --enable=similarities". ' 'If you want to run only the classes checker, but have no ' 'Warning level messages displayed, use' '"--disable=all --enable=classes --disable=W"'}), ('msg-template', {'type' : 'string', 'metavar': '<template>', 'group': 'Reports', 'help' : ('Template used to display messages. ' 'This is a python new-style format string ' 'used to format the message information. ' 'See doc for all details') }), ('include-ids', _deprecated_option('i', 'yn')), ('symbols', _deprecated_option('s', 'yn')), ('jobs', {'type' : 'int', 'metavar': '<n-processes>', 'short': 'j', 'default': 1, 'help' : '''Use multiple processes to speed up Pylint.''', }), ('unsafe-load-any-extension', {'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True, 'help': ('Allow loading of arbitrary C extensions. Extensions' ' are imported into the active Python interpreter and' ' may run arbitrary code.')}), ('extension-pkg-whitelist', {'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [], 'help': ('A comma-separated list of package or module names' ' from where C extensions may be loaded. Extensions are' ' loading into the active Python interpreter and may run' ' arbitrary code')} ), ) option_groups = ( ('Messages control', 'Options controling analysis messages'), ('Reports', 'Options related to output formating and reporting'), ) def __init__(self, options=(), reporter=None, option_groups=(), pylintrc=None): # some stuff has to be done before ancestors initialization... # # messages store / checkers / reporter / astroid manager self.msgs_store = utils.MessagesStore() self.reporter = None self._reporter_name = None self._reporters = {} self._checkers = collections.defaultdict(list) self._pragma_lineno = {} self._ignore_file = False # visit variables self.file_state = utils.FileState() self.current_name = None self.current_file = None self.stats = None # init options self._external_opts = options self.options = options + PyLinter.make_options() self.option_groups = option_groups + PyLinter.option_groups self._options_methods = { 'enable': self.enable, 'disable': self.disable} self._bw_options_methods = {'disable-msg': self.disable, 'enable-msg': self.enable} full_version = '%%prog %s, \nastroid %s, common %s\nPython %s' % ( version, astroid_version, common_version, sys.version) configuration.OptionsManagerMixIn.__init__( self, usage=__doc__, version=full_version, config_file=pylintrc or config.PYLINTRC) utils.MessagesHandlerMixIn.__init__(self) utils.ReportsHandlerMixIn.__init__(self) checkers.BaseTokenChecker.__init__(self) # provided reports self.reports = (('RP0001', 'Messages by category', report_total_messages_stats), ('RP0002', '% errors / warnings by module', report_messages_by_module_stats), ('RP0003', 'Messages', report_messages_stats), ('RP0004', 'Global evaluation', self.report_evaluation), ) self.register_checker(self) self._dynamic_plugins = set() self.load_provider_defaults() if reporter: self.set_reporter(reporter) def load_default_plugins(self): checkers.initialize(self) reporters.initialize(self) # Make sure to load the default reporter, because # the option has been set before the plugins had been loaded. if not self.reporter: self._load_reporter() def load_plugin_modules(self, modnames): """take a list of module names which are pylint plugins and load and register them """ for modname in modnames: if modname in self._dynamic_plugins: continue self._dynamic_plugins.add(modname) module = modutils.load_module_from_name(modname) module.register(self) def _load_reporter(self): name = self._reporter_name.lower() if name in self._reporters: self.set_reporter(self._reporters[name]()) else: qname = self._reporter_name module = modutils.load_module_from_name( modutils.get_module_part(qname)) class_name = qname.split('.')[-1] reporter_class = getattr(module, class_name) self.set_reporter(reporter_class()) def set_reporter(self, reporter): """set the reporter used to display messages and reports""" self.reporter = reporter reporter.linter = self def set_option(self, optname, value, action=None, optdict=None): """overridden from configuration.OptionsProviderMixin to handle some special options """ if optname in self._options_methods or \ optname in self._bw_options_methods: if value: try: meth = self._options_methods[optname] except KeyError: meth = self._bw_options_methods[optname] warnings.warn('%s is deprecated, replace it by %s' % ( optname, optname.split('-')[0]), DeprecationWarning) value = optik_ext.check_csv(None, optname, value) if isinstance(value, (list, tuple)): for _id in value: meth(_id, ignore_unknown=True) else: meth(value) return # no need to call set_option, disable/enable methods do it elif optname == 'output-format': self._reporter_name = value # If the reporters are already available, load # the reporter class. if self._reporters: self._load_reporter() try: checkers.BaseTokenChecker.set_option(self, optname, value, action, optdict) except configuration.UnsupportedAction: print('option %s can\'t be read from config file' % \ optname, file=sys.stderr) def register_reporter(self, reporter_class): self._reporters[reporter_class.name] = reporter_class def report_order(self): reports = sorted(self._reports, key=lambda x: getattr(x, 'name', '')) try: # Remove the current reporter and add it # at the end of the list. reports.pop(reports.index(self)) except ValueError: pass else: reports.append(self) return reports # checkers manipulation methods ############################################ def register_checker(self, checker): """register a new checker checker is an object implementing IRawChecker or / and IAstroidChecker """ assert checker.priority <= 0, 'checker priority can\'t be >= 0' self._checkers[checker.name].append(checker) for r_id, r_title, r_cb in checker.reports: self.register_report(r_id, r_title, r_cb, checker) self.register_options_provider(checker) if hasattr(checker, 'msgs'): self.msgs_store.register_messages(checker) checker.load_defaults() # Register the checker, but disable all of its messages. # TODO(cpopa): we should have a better API for this. if not getattr(checker, 'enabled', True): self.disable(checker.name) def disable_noerror_messages(self): for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category): if msgcat == 'E': for msgid in msgids: self.enable(msgid) else: for msgid in msgids: self.disable(msgid) def disable_reporters(self): """disable all reporters""" for reporters in six.itervalues(self._reports): for report_id, _, _ in reporters: self.disable_report(report_id) def error_mode(self): """error mode: enable only errors; no reports, no persistent""" self.disable_noerror_messages() self.disable('miscellaneous') self.set_option('reports', False) self.set_option('persistent', False) # block level option handling ############################################# # # see func_block_disable_msg.py test case for expected behaviour def process_tokens(self, tokens): """process tokens from the current module to search for module/block level options """ control_pragmas = {'disable', 'enable'} for (tok_type, content, start, _, _) in tokens: if tok_type != tokenize.COMMENT: continue match = utils.OPTION_RGX.search(content) if match is None: continue if match.group(1).strip() == "disable-all" or \ match.group(1).strip() == 'skip-file': if match.group(1).strip() == "disable-all": self.add_message('deprecated-pragma', line=start[0], args=('disable-all', 'skip-file')) self.add_message('file-ignored', line=start[0]) self._ignore_file = True return try: opt, value = match.group(1).split('=', 1) except ValueError: self.add_message('bad-inline-option', args=match.group(1).strip(), line=start[0]) continue opt = opt.strip() if opt in self._options_methods or opt in self._bw_options_methods: try: meth = self._options_methods[opt] except KeyError: meth = self._bw_options_methods[opt] # found a "(dis|en)able-msg" pragma deprecated suppresssion self.add_message('deprecated-pragma', line=start[0], args=(opt, opt.replace('-msg', ''))) for msgid in textutils.splitstrip(value): # Add the line where a control pragma was encountered. if opt in control_pragmas: self._pragma_lineno[msgid] = start[0] try: if (opt, msgid) == ('disable', 'all'): self.add_message('deprecated-pragma', line=start[0], args=('disable=all', 'skip-file')) self.add_message('file-ignored', line=start[0]) self._ignore_file = True return meth(msgid, 'module', start[0]) except utils.UnknownMessage: self.add_message('bad-option-value', args=msgid, line=start[0]) else: self.add_message('unrecognized-inline-option', args=opt, line=start[0]) # code checking methods ################################################### def get_checkers(self): """return all available checkers as a list""" return [self] + [c for checkers in six.itervalues(self._checkers) for c in checkers if c is not self] def prepare_checkers(self): """return checkers needed for activated messages and reports""" if not self.config.reports: self.disable_reporters() # get needed checkers neededcheckers = [self] for checker in self.get_checkers()[1:]: # fatal errors should not trigger enable / disabling a checker messages = set(msg for msg in checker.msgs if msg[0] != 'F' and self.is_message_enabled(msg)) if (messages or any(self.report_is_enabled(r[0]) for r in checker.reports)): neededcheckers.append(checker) # Sort checkers by priority neededcheckers = sorted(neededcheckers, key=operator.attrgetter('priority'), reverse=True) return neededcheckers def should_analyze_file(self, modname, path): # pylint: disable=unused-argument, no-self-use """Returns whether or not a module should be checked. This implementation returns True for all python source file, indicating that all files should be linted. Subclasses may override this method to indicate that modules satisfying certain conditions should not be linted. :param str modname: The name of the module to be checked. :param str path: The full path to the source code of the module. :returns: True if the module should be checked. :rtype: bool """ return path.endswith('.py') def check(self, files_or_modules): """main checking entry: check a list of files or modules from their name. """ # initialize msgs_state now that all messages have been registered into # the store for msg in self.msgs_store.messages: if not msg.may_be_emitted(): self._msgs_state[msg.msgid] = False if not isinstance(files_or_modules, (list, tuple)): files_or_modules = (files_or_modules,) if self.config.jobs == 1: with fix_import_path(files_or_modules): self._do_check(files_or_modules) else: # Hack that permits running pylint, on Windows, with -m switch # and with --jobs, as in 'python -2 -m pylint .. --jobs'. # For more details why this is needed, # see Python issue http://bugs.python.org/issue10845. mock_main = __name__ != '__main__' # -m switch if mock_main: sys.modules['__main__'] = sys.modules[__name__] try: self._parallel_check(files_or_modules) finally: if mock_main: sys.modules.pop('__main__') def _parallel_task(self, files_or_modules): # Prepare configuration for child linters. filter_options = {'symbols', 'include-ids', 'long-help'} filter_options.update([opt_name for opt_name, _ in self._external_opts]) config = {} for opt_providers in six.itervalues(self._all_options): for optname, optdict, val in opt_providers.options_and_values(): if optname not in filter_options: config[optname] = configuration.format_option_value(optdict, val) childs = [] manager = multiprocessing.Manager() # pylint: disable=no-member tasks_queue = manager.Queue() # pylint: disable=no-member results_queue = manager.Queue() # pylint: disable=no-member for _ in range(self.config.jobs): cl = ChildLinter(args=(tasks_queue, results_queue, config)) cl.start() # pylint: disable=no-member childs.append(cl) # send files to child linters for files_or_module in files_or_modules: tasks_queue.put([files_or_module]) # collect results from child linters failed = False for _ in files_or_modules: try: result = results_queue.get() except Exception as ex: print("internal error while receiving results from child linter", file=sys.stderr) print(ex, file=sys.stderr) failed = True break yield result # Stop child linters and wait for their completion. for _ in range(self.config.jobs): tasks_queue.put('STOP') for cl in childs: cl.join() if failed: print("Error occured, stopping the linter.", file=sys.stderr) sys.exit(32) def _parallel_check(self, files_or_modules): # Reset stats. self.open() all_stats = [] for result in self._parallel_task(files_or_modules): ( file_or_module, self.file_state.base_name, module, messages, stats, msg_status ) = result if file_or_module == files_or_modules[-1]: last_module = module for msg in messages: msg = utils.Message(*msg) self.set_current_module(module) self.reporter.handle_message(msg) all_stats.append(stats) self.msg_status |= msg_status self.stats = _merge_stats(itertools.chain(all_stats, [self.stats])) self.current_name = last_module # Insert stats data to local checkers. for checker in self.get_checkers(): if checker is not self: checker.stats = self.stats def _do_check(self, files_or_modules): walker = utils.PyLintASTWalker(self) checkers = self.prepare_checkers() tokencheckers = [c for c in checkers if interface.implements(c, interfaces.ITokenChecker) and c is not self] rawcheckers = [c for c in checkers if interface.implements(c, interfaces.IRawChecker)] # notify global begin for checker in checkers: checker.open() if interface.implements(checker, interfaces.IAstroidChecker): walker.add_checker(checker) # build ast and check modules or packages for descr in self.expand_files(files_or_modules): modname, filepath = descr['name'], descr['path'] if not descr['isarg'] and not self.should_analyze_file(modname, filepath): continue if self.config.files_output: reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension) self.reporter.set_output(open(reportfile, 'w')) self.set_current_module(modname, filepath) # get the module representation ast_node = self.get_ast(filepath, modname) if ast_node is None: continue # XXX to be correct we need to keep module_msgs_state for every # analyzed module (the problem stands with localized messages which # are only detected in the .close step) self.file_state = utils.FileState(descr['basename']) self._ignore_file = False # fix the current file (if the source file was not available or # if it's actually a c extension) self.current_file = ast_node.file # pylint: disable=maybe-no-member self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers) # warn about spurious inline messages handling for msgid, line, args in self.file_state.iter_spurious_suppression_messages(self.msgs_store): self.add_message(msgid, line, None, args) # notify global end self.stats['statement'] = walker.nbstatements checkers.reverse() for checker in checkers: checker.close() def expand_files(self, modules): """get modules and errors from a list of modules and handle errors """ result, errors = utils.expand_modules(modules, self.config.black_list) for error in errors: message = modname = error["mod"] key = error["key"] self.set_current_module(modname) if key == "fatal": message = str(error["ex"]).replace(os.getcwd() + os.sep, '') self.add_message(key, args=message) return result def set_current_module(self, modname, filepath=None): """set the name of the currently analyzed module and init statistics for it """ if not modname and filepath is None: return self.reporter.on_set_current_module(modname, filepath) self.current_name = modname self.current_file = filepath or modname self.stats['by_module'][modname] = {} self.stats['by_module'][modname]['statement'] = 0 for msg_cat in six.itervalues(utils.MSG_TYPES): self.stats['by_module'][modname][msg_cat] = 0 def get_ast(self, filepath, modname): """return a ast(roid) representation for a module""" try: return MANAGER.ast_from_file(filepath, modname, source=True) except SyntaxError as ex: self.add_message('syntax-error', line=ex.lineno, args=ex.msg) except astroid.AstroidBuildingException as ex: self.add_message('parse-error', args=ex) except Exception as ex: # pylint: disable=broad-except import traceback traceback.print_exc() self.add_message('astroid-error', args=(ex.__class__, ex)) def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers): """Check a module from its astroid representation.""" try: tokens = utils.tokenize_module(ast_node) except tokenize.TokenError as ex: self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0]) return if not ast_node.pure_python: self.add_message('raw-checker-failed', args=ast_node.name) else: #assert astroid.file.endswith('.py') # invoke ITokenChecker interface on self to fetch module/block # level options self.process_tokens(tokens) if self._ignore_file: return False # walk ast to collect line numbers self.file_state.collect_block_lines(self.msgs_store, ast_node) # run raw and tokens checkers for checker in rawcheckers: checker.process_module(ast_node) for checker in tokencheckers: checker.process_tokens(tokens) # generate events to astroid checkers walker.walk(ast_node) return True # IAstroidChecker interface ################################################# def open(self): """initialize counters""" self.stats = {'by_module' : {}, 'by_msg' : {}, } MANAGER.always_load_extensions = self.config.unsafe_load_any_extension MANAGER.extension_package_whitelist.update( self.config.extension_pkg_whitelist) for msg_cat in six.itervalues(utils.MSG_TYPES): self.stats[msg_cat] = 0 def generate_reports(self): """close the whole package /module, it's time to make reports ! if persistent run, pickle results for later comparison """ if self.file_state.base_name is not None: # load previous results if any previous_stats = config.load_results(self.file_state.base_name) # XXX code below needs refactoring to be more reporter agnostic self.reporter.on_close(self.stats, previous_stats) if self.config.reports: sect = self.make_reports(self.stats, previous_stats) if self.config.files_output: filename = 'pylint_global.' + self.reporter.extension self.reporter.set_output(open(filename, 'w')) else: sect = ureports.Section() if self.config.reports or self.config.output_format == 'html': self.reporter.display_results(sect) # save results if persistent run if self.config.persistent: config.save_results(self.stats, self.file_state.base_name) else: if self.config.output_format == 'html': # No output will be emitted for the html # reporter if the file doesn't exist, so emit # the results here. self.reporter.display_results(ureports.Section()) self.reporter.on_close(self.stats, {}) # specific reports ######################################################## def report_evaluation(self, sect, stats, previous_stats): """make the global evaluation report""" # check with at least check 1 statements (usually 0 when there is a # syntax error preventing pylint from further processing) if stats['statement'] == 0: raise utils.EmptyReport() # get a global note for the code evaluation = self.config.evaluation try: note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used except Exception as ex: # pylint: disable=broad-except msg = 'An exception occurred while rating: %s' % ex else: stats['global_note'] = note msg = 'Your code has been rated at %.2f/10' % note pnote = previous_stats.get('global_note') if pnote is not None: msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote) if self.config.comment: msg = '%s\n%s' % (msg, config.get_note_message(note)) sect.append(ureports.Text(msg)) # some reporting functions #################################################### def report_total_messages_stats(sect, stats, previous_stats): """make total errors / warnings report""" lines = ['type', 'number', 'previous', 'difference'] lines += checkers.table_lines_from_stats(stats, previous_stats, ('convention', 'refactor', 'warning', 'error')) sect.append(ureports.Table(children=lines, cols=4, rheaders=1)) def report_messages_stats(sect, stats, _): """make messages type report""" if not stats['by_msg']: # don't print this report when we didn't detected any errors raise utils.EmptyReport() in_order = sorted([(value, msg_id) for msg_id, value in six.iteritems(stats['by_msg']) if not msg_id.startswith('I')]) in_order.reverse() lines = ('message id', 'occurrences') for value, msg_id in in_order: lines += (msg_id, str(value)) sect.append(ureports.Table(children=lines, cols=2, rheaders=1)) def report_messages_by_module_stats(sect, stats, _): """make errors / warnings by modules report""" if len(stats['by_module']) == 1: # don't print this report when we are analysing a single module raise utils.EmptyReport() by_mod = collections.defaultdict(dict) for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'): total = stats[m_type] for module in six.iterkeys(stats['by_module']): mod_total = stats['by_module'][module][m_type] if total == 0: percent = 0 else: percent = float((mod_total)*100) / total by_mod[module][m_type] = percent sorted_result = [] for module, mod_info in six.iteritems(by_mod): sorted_result.append((mod_info['error'], mod_info['warning'], mod_info['refactor'], mod_info['convention'], module)) sorted_result.sort() sorted_result.reverse() lines = ['module', 'error', 'warning', 'refactor', 'convention'] for line in sorted_result: # Don't report clean modules. if all(entry == 0 for entry in line[:-1]): continue lines.append(line[-1]) for val in line[:-1]: lines.append('%.2f' % val) if len(lines) == 5: raise utils.EmptyReport() sect.append(ureports.Table(children=lines, cols=5, rheaders=1)) # utilities ################################################################### class ArgumentPreprocessingError(Exception): """Raised if an error occurs during argument preprocessing.""" def preprocess_options(args, search_for): """look for some options (keys of <search_for>) which have to be processed before others values of <search_for> are callback functions to call when the option is found """ i = 0 while i < len(args): arg = args[i] if arg.startswith('--'): try: option, val = arg[2:].split('=', 1) except ValueError: option, val = arg[2:], None try: cb, takearg = search_for[option] except KeyError: i += 1 else: del args[i] if takearg and val is None: if i >= len(args) or args[i].startswith('-'): msg = 'Option %s expects a value' % option raise ArgumentPreprocessingError(msg) val = args[i] del args[i] elif not takearg and val is not None: msg = "Option %s doesn't expects a value" % option raise ArgumentPreprocessingError(msg) cb(option, val) else: i += 1 @contextlib.contextmanager def fix_import_path(args): """Prepare sys.path for running the linter checks. Within this context, each of the given arguments is importable. Paths are added to sys.path in corresponding order to the arguments. We avoid adding duplicate directories to sys.path. `sys.path` is reset to its original value upon exitign this context. """ orig = list(sys.path) changes = [] for arg in args: path = _get_python_path(arg) if path in changes: continue else: changes.append(path) sys.path[:] = changes + sys.path try: yield finally: sys.path[:] = orig class Run(object): """helper class to use as main for pylint : run(*sys.argv[1:]) """ LinterClass = PyLinter option_groups = ( ('Commands', 'Options which are actually commands. Options in this \ group are mutually exclusive.'), ) def __init__(self, args, reporter=None, exit=True): self._rcfile = None self._plugins = [] try: preprocess_options(args, { # option: (callback, takearg) 'init-hook': (cb_init_hook, True), 'rcfile': (self.cb_set_rcfile, True), 'load-plugins': (self.cb_add_plugins, True), }) except ArgumentPreprocessingError as ex: print(ex, file=sys.stderr) sys.exit(32) self.linter = linter = self.LinterClass(( ('rcfile', {'action' : 'callback', 'callback' : lambda *args: 1, 'type': 'string', 'metavar': '<file>', 'help' : 'Specify a configuration file.'}), ('init-hook', {'action' : 'callback', 'callback' : lambda *args: 1, 'type' : 'string', 'metavar': '<code>', 'level': 1, 'help' : 'Python code to execute, usually for sys.path ' 'manipulation such as pygtk.require().'}), ('help-msg', {'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>', 'callback' : self.cb_help_message, 'group': 'Commands', 'help' : 'Display a help message for the given message id and ' 'exit. The value may be a comma separated list of message ids.'}), ('list-msgs', {'action' : 'callback', 'metavar': '<msg-id>', 'callback' : self.cb_list_messages, 'group': 'Commands', 'level': 1, 'help' : "Generate pylint's messages."}), ('list-conf-levels', {'action' : 'callback', 'callback' : cb_list_confidence_levels, 'group': 'Commands', 'level': 1, 'help' : "Generate pylint's messages."}), ('full-documentation', {'action' : 'callback', 'metavar': '<msg-id>', 'callback' : self.cb_full_documentation, 'group': 'Commands', 'level': 1, 'help' : "Generate pylint's full documentation."}), ('generate-rcfile', {'action' : 'callback', 'callback' : self.cb_generate_config, 'group': 'Commands', 'help' : 'Generate a sample configuration file according to ' 'the current configuration. You can put other options ' 'before this one to get them in the generated ' 'configuration.'}), ('generate-man', {'action' : 'callback', 'callback' : self.cb_generate_manpage, 'group': 'Commands', 'help' : "Generate pylint's man page.", 'hide': True}), ('errors-only', {'action' : 'callback', 'callback' : self.cb_error_mode, 'short': 'E', 'help' : 'In error mode, checkers without error messages are ' 'disabled and for others, only the ERROR messages are ' 'displayed, and no reports are done by default'''}), ('py3k', {'action' : 'callback', 'callback' : self.cb_python3_porting_mode, 'help' : 'In Python 3 porting mode, all checkers will be ' 'disabled and only messages emitted by the porting ' 'checker will be displayed'}), ('profile', {'type' : 'yn', 'metavar' : '<y_or_n>', 'default': False, 'hide': True, 'help' : 'Profiled execution.'}), ), option_groups=self.option_groups, pylintrc=self._rcfile) # register standard checkers linter.load_default_plugins() # load command line plugins linter.load_plugin_modules(self._plugins) # add some help section linter.add_help_section('Environment variables', config.ENV_HELP, level=1) # pylint: disable=bad-continuation linter.add_help_section('Output', 'Using the default text output, the message format is : \n' ' \n' ' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n' ' \n' 'There are 5 kind of message types : \n' ' * (C) convention, for programming standard violation \n' ' * (R) refactor, for bad code smell \n' ' * (W) warning, for python specific problems \n' ' * (E) error, for probable bugs in the code \n' ' * (F) fatal, if an error occurred which prevented pylint from doing further\n' 'processing.\n' , level=1) linter.add_help_section('Output status code', 'Pylint should leave with following status code: \n' ' * 0 if everything went fine \n' ' * 1 if a fatal message was issued \n' ' * 2 if an error message was issued \n' ' * 4 if a warning message was issued \n' ' * 8 if a refactor message was issued \n' ' * 16 if a convention message was issued \n' ' * 32 on usage error \n' ' \n' 'status 1 to 16 will be bit-ORed so you can know which different categories has\n' 'been issued by analysing pylint output status code\n', level=1) # read configuration linter.disable('pointless-except') linter.disable('suppressed-message') linter.disable('useless-suppression') linter.read_config_file() config_parser = linter.cfgfile_parser # run init hook, if present, before loading plugins if config_parser.has_option('MASTER', 'init-hook'): cb_init_hook('init-hook', textutils.unquote(config_parser.get('MASTER', 'init-hook'))) # is there some additional plugins in the file configuration, in if config_parser.has_option('MASTER', 'load-plugins'): plugins = textutils.splitstrip( config_parser.get('MASTER', 'load-plugins')) linter.load_plugin_modules(plugins) # now we can load file config and command line, plugins (which can # provide options) have been registered linter.load_config_file() if reporter: # if a custom reporter is provided as argument, it may be overridden # by file parameters, so re-set it here, but before command line # parsing so it's still overrideable by command line option linter.set_reporter(reporter) try: args = linter.load_command_line_configuration(args) except SystemExit as exc: if exc.code == 2: # bad options exc.code = 32 raise if not args: print(linter.help()) sys.exit(32) if linter.config.jobs < 0: print("Jobs number (%d) should be greater than 0" % linter.config.jobs, file=sys.stderr) sys.exit(32) if linter.config.jobs > 1 or linter.config.jobs == 0: if multiprocessing is None: print("Multiprocessing library is missing, " "fallback to single process", file=sys.stderr) linter.set_option("jobs", 1) else: if linter.config.jobs == 0: linter.config.jobs = multiprocessing.cpu_count() # insert current working directory to the python path to have a correct # behaviour if self.linter.config.profile: with fix_import_path(args): print('** profiled run', file=sys.stderr) import cProfile, pstats cProfile.runctx('linter.check(%r)' % args, globals(), locals(), 'stones.prof') data = pstats.Stats('stones.prof') data.strip_dirs() data.sort_stats('time', 'calls') data.print_stats(30) else: linter.check(args) linter.generate_reports() if exit: sys.exit(self.linter.msg_status) def cb_set_rcfile(self, name, value): """callback for option preprocessing (i.e. before option parsing)""" self._rcfile = value def cb_add_plugins(self, name, value): """callback for option preprocessing (i.e. before option parsing)""" self._plugins.extend(textutils.splitstrip(value)) def cb_error_mode(self, *args, **kwargs): """error mode: * disable all but error messages * disable the 'miscellaneous' checker which can be safely deactivated in debug * disable reports * do not save execution information """ self.linter.error_mode() def cb_generate_config(self, *args, **kwargs): """optik callback for sample config file generation""" self.linter.generate_config(skipsections=('COMMANDS',)) sys.exit(0) def cb_generate_manpage(self, *args, **kwargs): """optik callback for sample config file generation""" from pylint import __pkginfo__ self.linter.generate_manpage(__pkginfo__) sys.exit(0) def cb_help_message(self, option, optname, value, parser): """optik callback for printing some help about a particular message""" self.linter.msgs_store.help_message(textutils.splitstrip(value)) sys.exit(0) def cb_full_documentation(self, option, optname, value, parser): """optik callback for printing full documentation""" self.linter.print_full_documentation() sys.exit(0) def cb_list_messages(self, option, optname, value, parser): # FIXME """optik callback for printing available messages""" self.linter.msgs_store.list_messages() sys.exit(0) def cb_python3_porting_mode(self, *args, **kwargs): """Activate only the python3 porting checker.""" self.linter.disable('all') self.linter.enable('python3') def cb_list_confidence_levels(option, optname, value, parser): for level in interfaces.CONFIDENCE_LEVELS: print('%-18s: %s' % level) sys.exit(0) def cb_init_hook(optname, value): """exec arbitrary code to set sys.path for instance""" exec(value) # pylint: disable=exec-used if __name__ == '__main__': Run(sys.argv[1:])
bsd-3-clause
onceuponatimeforever/oh-mainline
vendor/packages/docutils/test/test_parsers/test_rst/test_directives/test_figures.py
16
7300
#! /usr/bin/env python # $Id: test_figures.py 7062 2011-06-30 22:14:29Z milde $ # Author: David Goodger <[email protected]> # Copyright: This module has been placed in the public domain. """ Tests for images.py figure directives. """ from __init__ import DocutilsTestSupport def suite(): s = DocutilsTestSupport.ParserTestSuite() s.generateTests(totest) return s totest = {} totest['figures'] = [ ["""\ .. figure:: picture.png """, """\ <document source="test data"> <figure> <image uri="picture.png"> """], ["""\ .. figure:: picture.png A picture with a caption. """, """\ <document source="test data"> <figure> <image uri="picture.png"> <caption> A picture with a caption. """], ["""\ .. figure:: picture.png - A picture with an invalid caption. """, """\ <document source="test data"> <figure> <image uri="picture.png"> <system_message level="3" line="1" source="test data" type="ERROR"> <paragraph> Figure caption must be a paragraph or empty comment. <literal_block xml:space="preserve"> .. figure:: picture.png \n\ - A picture with an invalid caption. """], ["""\ .. figure:: picture.png .. A picture with a legend but no caption. """, """\ <document source="test data"> <figure> <image uri="picture.png"> <legend> <paragraph> A picture with a legend but no caption. """], ["""\ .. Figure:: picture.png :height: 100 :width: 200 :scale: 50 A picture with image options and a caption. """, """\ <document source="test data"> <figure> <image height="100" scale="50" uri="picture.png" width="200"> <caption> A picture with image options and a caption. """], ["""\ .. Figure:: picture.png :height: 100 :alt: alternate text :width: 200 :scale: 50 :figwidth: 300 :figclass: class1 class2 :name: fig:pix A picture with image options on individual lines, and this caption. """, """\ <document source="test data"> <figure classes="class1 class2" width="300px"> <image alt="alternate text" height="100" ids="fig-pix" names="fig:pix" scale="50" uri="picture.png" width="200"> <caption> A picture with image options on individual lines, and this caption. """], ["""\ .. figure:: picture.png :align: center A figure with explicit alignment. """, """\ <document source="test data"> <figure align="center"> <image uri="picture.png"> <caption> A figure with explicit alignment. """], ["""\ .. figure:: picture.png :align: top A figure with wrong alignment. """, """\ <document source="test data"> <system_message level="3" line="1" source="test data" type="ERROR"> <paragraph> Error in "figure" directive: invalid option value: (option: "align"; value: 'top') "top" unknown; choose from "left", "center", or "right". <literal_block xml:space="preserve"> .. figure:: picture.png :align: top A figure with wrong alignment. """], ["""\ This figure lacks a caption. It may still have a "Figure 1."-style caption appended in the output. .. figure:: picture.png """, """\ <document source="test data"> <paragraph> This figure lacks a caption. It may still have a "Figure 1."-style caption appended in the output. <figure> <image uri="picture.png"> """], ["""\ .. figure:: picture.png A picture with a caption and a legend. +-----------------------+-----------------------+ | Symbol | Meaning | +=======================+=======================+ | .. image:: tent.png | Campground | +-----------------------+-----------------------+ | .. image:: waves.png | Lake | +-----------------------+-----------------------+ | .. image:: peak.png | Mountain | +-----------------------+-----------------------+ """, """\ <document source="test data"> <figure> <image uri="picture.png"> <caption> A picture with a caption and a legend. <legend> <table> <tgroup cols="2"> <colspec colwidth="23"> <colspec colwidth="23"> <thead> <row> <entry> <paragraph> Symbol <entry> <paragraph> Meaning <tbody> <row> <entry> <image uri="tent.png"> <entry> <paragraph> Campground <row> <entry> <image uri="waves.png"> <entry> <paragraph> Lake <row> <entry> <image uri="peak.png"> <entry> <paragraph> Mountain """], ["""\ .. figure:: picture.png .. A picture with a legend but no caption. (The empty comment replaces the caption, which must be a single paragraph.) """, """\ <document source="test data"> <figure> <image uri="picture.png"> <legend> <paragraph> A picture with a legend but no caption. (The empty comment replaces the caption, which must be a single paragraph.) """], ["""\ Testing for line-leaks: .. figure:: picture.png A picture with a caption. .. figure:: picture.png A picture with a caption. .. figure:: picture.png A picture with a caption. .. figure:: picture.png .. figure:: picture.png .. figure:: picture.png .. figure:: picture.png A picture with a caption. .. figure:: picture.png .. figure:: picture.png A picture with a caption. .. figure:: picture.png """, """\ <document source="test data"> <paragraph> Testing for line-leaks: <figure> <image uri="picture.png"> <caption> A picture with a caption. <figure> <image uri="picture.png"> <caption> A picture with a caption. <figure> <image uri="picture.png"> <caption> A picture with a caption. <figure> <image uri="picture.png"> <figure> <image uri="picture.png"> <figure> <image uri="picture.png"> <figure> <image uri="picture.png"> <caption> A picture with a caption. <figure> <image uri="picture.png"> <figure> <image uri="picture.png"> <caption> A picture with a caption. <figure> <image uri="picture.png"> """], ] if __name__ == '__main__': import unittest unittest.main(defaultTest='suite')
agpl-3.0
l00py/KML_Lookup
TA-KML_lookup/bin/shapely/topology.py
18
2257
""" Intermediaries supporting GEOS topological operations These methods all take Shapely geometries and other Python objects and delegate to GEOS functions via ctypes. These methods return ctypes objects that should be recast by the caller. """ from ctypes import byref, c_double from shapely.geos import TopologicalError, lgeos class Validating(object): def _validate(self, ob, stop_prepared=False): if ob is None or ob._geom is None: raise ValueError("Null geometry supports no operations") if stop_prepared and not hasattr(ob, 'type'): raise ValueError("Prepared geometries cannot be operated on") class Delegating(Validating): def __init__(self, name): self.fn = lgeos.methods[name] def _check_topology(self, err, *geoms): """Raise TopologicalError if geoms are invalid. Else, raise original error. """ for geom in geoms: if not geom.is_valid: raise TopologicalError( "The operation '%s' could not be performed. " "Likely cause is invalidity of the geometry %s" % ( self.fn.__name__, repr(geom))) raise err class BinaryRealProperty(Delegating): def __call__(self, this, other): self._validate(this) self._validate(other, stop_prepared=True) d = c_double() retval = self.fn(this._geom, other._geom, byref(d)) return d.value class UnaryRealProperty(Delegating): def __call__(self, this): self._validate(this) d = c_double() retval = self.fn(this._geom, byref(d)) return d.value class BinaryTopologicalOp(Delegating): def __call__(self, this, other, *args): self._validate(this) self._validate(other, stop_prepared=True) product = self.fn(this._geom, other._geom, *args) if product is None: err = TopologicalError( "This operation could not be performed. Reason: unknown") self._check_topology(err, this, other) return product class UnaryTopologicalOp(Delegating): def __call__(self, this, *args): self._validate(this) return self.fn(this._geom, *args)
mit
foreni-packages/golismero
thirdparty_libs/django/conf/locale/lt/formats.py
104
1503
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'Y \m. E j \d.' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = r'Y \m. E j \d., H:i:s' YEAR_MONTH_FORMAT = r'Y \m. F' MONTH_DAY_FORMAT = r'E j \d.' SHORT_DATE_FORMAT = 'Y-m-d' SHORT_DATETIME_FORMAT = 'Y-m-d H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' '%H.%M.%S', # '14.30.59' '%H.%M', # '14.30' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59' '%d.%m.%y %H.%M', # '25.10.06 14.30' '%d.%m.%y', # '25.10.06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
gpl-2.0
uclouvain/osis_louvain
base/business/education_groups/postponement.py
1
7431
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2017 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django import forms from django.db import Error from django.utils.translation import ugettext as _ from base.business.utils.model import model_to_dict_fk, compare_objects, update_object from base.models.academic_year import AcademicYear, current_academic_year from base.models.education_group_year import EducationGroupYear EDUCATION_GROUP_MAX_POSTPONE_YEARS = 6 FIELD_TO_EXCLUDE = ['id', 'external_id', 'academic_year'] class ConsistencyError(Error): def __init__(self, last_instance_updated, differences, *args, **kwargs): self.last_instance_updated = last_instance_updated self.differences = differences super().__init__(*args, **kwargs) def _compute_end_year(education_group): """ This function compute the end year that the postponement must achieve :arg education_group: The education group that we want to postpone """ # Compute max postponement based on config EDUCATION_GROUP_MAX_POSTPONE_YEARS max_postponement_end_year = current_academic_year().year + EDUCATION_GROUP_MAX_POSTPONE_YEARS if education_group.end_year: # Get the min [Prevent education_group.end_year > academic_year.year provided by system] max_postponement_end_year = min(max_postponement_end_year, education_group.end_year) # Lookup on database, get the latest existing education group year [Prevent desync end_date and data] latest_egy = education_group.educationgroupyear_set.select_related('academic_year') \ .order_by('academic_year__year').last() return max(max_postponement_end_year, latest_egy.academic_year.year) def _postpone_m2m(education_group_year, postponed_egy): fields_to_exclude = [] opts = education_group_year._meta for f in opts.many_to_many: if f.name in fields_to_exclude: continue m2m_cls = f.rel.through # Remove records of postponed_egy m2m_cls.objects.all().filter(education_group_year=postponed_egy).delete() # Recreate records for m2m_obj in m2m_cls.objects.all().filter(education_group_year_id=education_group_year): m2m_data_to_postpone = model_to_dict_fk(m2m_obj, exclude=['id', 'external_id', 'education_group_year']) m2m_cls(education_group_year=postponed_egy, **m2m_data_to_postpone).save() def duplicate_education_group_year(old_education_group_year, new_academic_year, dict_new_value=None, dict_initial_egy=None): if not dict_new_value: dict_new_value = model_to_dict_fk(old_education_group_year, exclude=FIELD_TO_EXCLUDE) defaults_values = {x: v for x, v in dict_new_value.items() if not isinstance(v, list)} postponed_egy, created = EducationGroupYear.objects.get_or_create( education_group=old_education_group_year.education_group, academic_year=new_academic_year, # Create object without m2m relations defaults=defaults_values ) # During create of new postponed object, we need to update only the m2m relations if created: # Postpone the m2m [languages / secondary_domains] _postpone_m2m(old_education_group_year, postponed_egy) # During the update, we need to check if the postponed object has been modify else: dict_postponed_egy = model_to_dict_fk(postponed_egy, exclude=FIELD_TO_EXCLUDE) differences = compare_objects(dict_initial_egy, dict_postponed_egy) \ if dict_initial_egy and dict_postponed_egy else {} if differences: raise ConsistencyError(postponed_egy, differences) update_object(postponed_egy, dict_new_value) # Postpone the m2m [languages / secondary_domains] _postpone_m2m(old_education_group_year, postponed_egy) return postponed_egy class PostponementEducationGroupYearMixin: """ This mixin will report the modification to the futures years. If one of the future year is already modified, it will stop the postponement and append a warning message """ field_to_exclude = FIELD_TO_EXCLUDE dict_initial_egy = {} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.postpone_start_year = None self.postpone_end_year = None self.education_group_year_postponed = [] self.postponement_errors = {} self.warnings = [] if not self._is_creation(): self.dict_initial_egy = model_to_dict_fk( self.forms[forms.ModelForm].instance, exclude=self.field_to_exclude ) def save(self): education_group_year = super().save() self.postpone_start_year = education_group_year.academic_year.year self.postpone_end_year = _compute_end_year(education_group_year.education_group) self._start_postponement(education_group_year) return education_group_year def _start_postponement(self, education_group_year): dict_new_value = model_to_dict_fk(education_group_year, exclude=self.field_to_exclude) for academic_year in AcademicYear.objects.filter(year__gt=self.postpone_start_year, year__lte=self.postpone_end_year): try: postponed_egy = duplicate_education_group_year( education_group_year, academic_year, dict_new_value, self.dict_initial_egy ) self.education_group_year_postponed.append(postponed_egy) except ConsistencyError as e: self.add_postponement_errors(e) def add_postponement_errors(self, consistency_error): for difference in consistency_error.differences: error = _("%(col_name)s has been already modified.") % { "col_name": _(EducationGroupYear._meta.get_field(difference).verbose_name).title(), } self.warnings.append( _("Consistency error in %(academic_year)s : %(error)s") % { 'academic_year': consistency_error.last_instance_updated.academic_year, 'error': error } )
agpl-3.0