repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
oppia/oppia
extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py
4
2849
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, softwar # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python configuration for MultipleChoiceInput interaction.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from extensions.interactions import base class MultipleChoiceInput(base.BaseInteraction): """Interaction for multiple choice input.""" name = 'Multiple Choice' description = ( 'Allows learners to select one of a list of multiple-choice options.') display_mode = base.DISPLAY_MODE_INLINE _dependency_ids = [] answer_type = 'NonnegativeInt' instructions = None narrow_instructions = None needs_summary = False # Radio buttons get unselected when specifying a solution. This needs to be # fixed before solution feature can support this interaction. can_have_solution = False show_generic_submit_button = False _customization_arg_specs = [{ 'name': 'choices', 'description': 'Multiple Choice options', 'schema': { 'type': 'list', 'validators': [{ 'id': 'has_length_at_least', 'min_value': 1, }], 'items': { 'type': 'custom', 'obj_type': 'SubtitledHtml', 'replacement_ui_config': { 'html': { 'hide_complex_extensions': True, 'placeholder': ( 'Enter an option for the learner to select'), } } }, 'ui_config': { 'add_element_text': 'Add multiple choice option', } }, 'default_value': [{ 'content_id': None, 'html': '' }], }, { 'name': 'showChoicesInShuffledOrder', 'description': 'Shuffle answer choices', 'schema': { 'type': 'bool', }, 'default_value': True }] _answer_visualization_specs = [{ 'id': 'SortedTiles', 'options': {'header': 'Top answers', 'use_percentages': True}, 'calculation_id': 'AnswerFrequencies', 'addressed_info_is_supported': True, }]
apache-2.0
5,902,671,003,178,104,000
32.916667
79
0.585118
false
sourcepole/qgis
qgis/python/plugins/fTools/tools/doReProject.py
3
6615
# -*- coding: utf-8 -*- from PyQt4.QtCore import * from PyQt4.QtGui import * import ftools_utils from qgis.core import * from qgis.gui import * from ui_frmReProject import Ui_Dialog import types class Dialog(QDialog, Ui_Dialog): def __init__(self, iface): QDialog.__init__(self) self.iface = iface self.setupUi(self) QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile) QObject.connect(self.btnProjection, SIGNAL("clicked()"), self.outProjFile) QObject.connect(self.inShape, SIGNAL("currentIndexChanged(QString)"), self.updateProj1) QObject.connect(self.cmbLayer, SIGNAL("currentIndexChanged(QString)"), self.updateProj2) self.setWindowTitle( self.tr("Export to new projection") ) self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok ) self.progressBar.setValue(0) mapCanvas = self.iface.mapCanvas() layers = ftools_utils.getLayerNames([QGis.Point, QGis.Line, QGis.Polygon]) self.inShape.addItems(layers) self.cmbLayer.addItems(layers) def updateProj1(self, layerName): self.inRef.clear() tempLayer = ftools_utils.getVectorLayerByName(layerName) crs = tempLayer.dataProvider().crs().toProj4() self.inRef.insert(unicode(crs)) def updateProj2(self, layerName): self.outRef.clear() tempLayer = ftools_utils.getVectorLayerByName(layerName) crs = tempLayer.dataProvider().crs().toProj4() self.outRef.insert(unicode(crs)) def accept(self): self.buttonOk.setEnabled( False ) if self.inShape.currentText() == "": QMessageBox.information(self, self.tr("Export to new projection"), self.tr("No input layer specified")) elif self.outShape.text() == "": QMessageBox.information(self, self.tr("Export to new projection"), self.tr("Please specify output shapefile")) elif self.txtProjection.text() == "" and self.rdoProjection.isChecked(): QMessageBox.information(self, self.tr("Define current projection"), self.tr("Please specify spatial reference system")) elif self.cmbLayer.currentText() == "" and self.rdoLayer.isChecked(): QMessageBox.information(self, self.tr("Define current projection"), self.tr("Please specify spatial reference system")) else: inName = self.inShape.currentText() self.progressBar.setValue(5) outPath = self.outShape.text() self.progressBar.setValue(10) if self.rdoProjection.isChecked(): outProj = self.txtProjection.text() else: outProj = self.cmbLayer.currentText() self.progressBar.setValue(15) if outPath.contains("\\"): outName = outPath.right((outPath.length() - outPath.lastIndexOf("\\")) - 1) else: outName = outPath.right((outPath.length() - outPath.lastIndexOf("/")) - 1) if outName.endsWith(".shp"): outName = outName.left(outName.length() - 4) if self.reProject(inName, unicode(outPath), unicode(outProj), self.rdoProjection.isChecked(), self.progressBar): self.outShape.clear() self.progressBar.setValue(100) addToTOC = QMessageBox.question(self, self.tr("Export to new projection"), self.tr("Created projected shapefile:\n%1\n\nWould you like to add the new layer to the TOC?").arg( outPath ), QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton) if addToTOC == QMessageBox.Yes: self.vlayer = QgsVectorLayer(outPath, unicode(outName), "ogr") QgsMapLayerRegistry.instance().addMapLayer(self.vlayer) self.progressBar.setValue(0) self.buttonOk.setEnabled( True ) def outProjFile(self): format = QString( "<h2>%1</h2>%2 <br/> %3" ) header = self.tr( "Choose output CRS:" ) sentence1 = self.tr( "Please select the projection system to be used by the output layer." ) sentence2 = self.tr( "Output layer will be projected from it's current CRS to the output CRS." ) self.projSelect = QgsGenericProjectionSelector(self, Qt.Widget) self.projSelect.setMessage( format.arg( header ).arg( sentence1 ).arg( sentence2 )) if self.projSelect.exec_(): projString = self.projSelect.selectedProj4String() if projString == "": QMessageBox.information(self, self.tr("Export to new projection"), self.tr("No Valid CRS selected")) return else: self.txtProjection.clear() self.txtProjection.insert(projString) else: return def outFile(self): self.outShape.clear() ( self.shapefileName, self.encoding ) = ftools_utils.saveDialog( self ) if self.shapefileName is None or self.encoding is None: return self.outShape.setText( QString( self.shapefileName ) ) def reProject(self, inName, outPath, outProj, predefined, progressBar): vlayer = ftools_utils.getVectorLayerByName(inName) provider = vlayer.dataProvider() feat = QgsFeature() allAttrs = provider.attributeIndexes() progressBar.setValue(2) provider.select(allAttrs) fieldList = ftools_utils.getFieldList(vlayer) crsDest = QgsCoordinateReferenceSystem() if predefined: crsDest.createFromProj4(outProj) else: destLayer = ftools_utils.getVectorLayerByName(outProj) crsDest = destLayer.dataProvider().crs() if not crsDest.isValid(): QMessageBox.information(self, self.tr("Export to new projection"), self.tr("Output spatial reference system is not valid")) return False else: progressBar.setValue(5) crsSrc = provider.crs() if crsSrc != crsDest: xform = QgsCoordinateTransform(crsSrc, crsDest) progressBar.setValue(10) check = QFile(self.shapefileName) if check.exists(): if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName): return error = QgsVectorFileWriter.writeAsShapefile(vlayer, self.shapefileName, self.encoding, crsDest, False) if error == QgsVectorFileWriter.NoError: return True else: return False else: QMessageBox.information(self, self.tr("Export to new projection"), self.tr("Identical output spatial reference system chosen")) return False
gpl-2.0
-5,447,309,672,979,438,000
50.27907
256
0.637944
false
postlund/home-assistant
tests/util/test_distance.py
14
2520
"""Test Home Assistant distance utility functions.""" import pytest from homeassistant.const import ( LENGTH_FEET, LENGTH_KILOMETERS, LENGTH_METERS, LENGTH_MILES, ) import homeassistant.util.distance as distance_util INVALID_SYMBOL = "bob" VALID_SYMBOL = LENGTH_KILOMETERS def test_convert_same_unit(): """Test conversion from any unit to same unit.""" assert distance_util.convert(5, LENGTH_KILOMETERS, LENGTH_KILOMETERS) == 5 assert distance_util.convert(2, LENGTH_METERS, LENGTH_METERS) == 2 assert distance_util.convert(10, LENGTH_MILES, LENGTH_MILES) == 10 assert distance_util.convert(9, LENGTH_FEET, LENGTH_FEET) == 9 def test_convert_invalid_unit(): """Test exception is thrown for invalid units.""" with pytest.raises(ValueError): distance_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL) with pytest.raises(ValueError): distance_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL) def test_convert_nonnumeric_value(): """Test exception is thrown for nonnumeric type.""" with pytest.raises(TypeError): distance_util.convert("a", LENGTH_KILOMETERS, LENGTH_METERS) def test_convert_from_miles(): """Test conversion from miles to other units.""" miles = 5 assert distance_util.convert(miles, LENGTH_MILES, LENGTH_KILOMETERS) == 8.04672 assert distance_util.convert(miles, LENGTH_MILES, LENGTH_METERS) == 8046.72 assert distance_util.convert(miles, LENGTH_MILES, LENGTH_FEET) == 26400.0008448 def test_convert_from_feet(): """Test conversion from feet to other units.""" feet = 5000 assert distance_util.convert(feet, LENGTH_FEET, LENGTH_KILOMETERS) == 1.524 assert distance_util.convert(feet, LENGTH_FEET, LENGTH_METERS) == 1524 assert distance_util.convert(feet, LENGTH_FEET, LENGTH_MILES) == 0.9469694040000001 def test_convert_from_kilometers(): """Test conversion from kilometers to other units.""" km = 5 assert distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_FEET) == 16404.2 assert distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_METERS) == 5000 assert distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_MILES) == 3.106855 def test_convert_from_meters(): """Test conversion from meters to other units.""" m = 5000 assert distance_util.convert(m, LENGTH_METERS, LENGTH_FEET) == 16404.2 assert distance_util.convert(m, LENGTH_METERS, LENGTH_KILOMETERS) == 5 assert distance_util.convert(m, LENGTH_METERS, LENGTH_MILES) == 3.106855
apache-2.0
4,716,703,524,053,645,000
35.521739
87
0.713492
false
sergiyprotsiv/openaddresses
scripts/si/addNames.py
5
3893
# -*- coding: utf-8 -*- import __future__ import csv, sys, json, copy, datetime, time, os def readDictionary(path, filename): dict = {} with open(os.path.join(path, filename)) as f: reader = csv.reader(f, delimiter=';') next(reader) # skip header for row in reader: dict[row[0]] = row[1].strip() #print(streets[row[1]]) print("Read %6d dictionary entries from %s" % (len(dict), filename)) return dict def main(path): # get timestamp from shapefiles timestamp = "" #datetime.datetime.now().strftime('%Y%m%d') with open(os.path.join(path, 'timestamp.txt'), 'r') as tsfile: timestamp = tsfile.read().replace('\n', '') # read dictionaries into memory as this is much faster to use then # (4 seconds vs 22 hours when processing by joining shapefiles using ogr2ogr on the same machine) # ==> street-names.csv <== # UL_MID;UL_UIME # 16183954;Župančičeva ulica # 16172839;Brezovce # 16183911;Liparjeva ulica streets = readDictionary(path, "street-names.csv") # ==> city-names.csv <== # NA_MID;NA_UIME # 10130000;Rovišče pri Studencu # 10084300;Podbreg # 10085900;Brdce cities = readDictionary(path, "city-names.csv") # ==> commune-names.csv <== # OB_MID;OB_UIME # 11027962;Novo mesto # 11027113;Mislinja # 11026982;Krško communes = readDictionary(path, "commune-names.csv") # ==> post-codes.csv <== # PT_MID;PT_ID # 21428698;8321 # 21431125;8222 # 21431656;5297 postcodes = readDictionary(path, "post-codes.csv") # ==> region-names.csv <== # SR_MID;SR_UIME # 21428205;Jugovzhodna Slovenija # 21428132;Pomurska # 21428248;Goriška regions = readDictionary(path, "region-names.csv") # ==> spatial-unit-region-mapping.csv <== # PO_MID;SR_MID # 10347912;21428248 # 10348471;21428248 # 10348463;21428248 spatregionmap = readDictionary(path, "spatial-unit-region-mapping.csv") # Main loop: transform # ==> addresses-noname.csv <== # X;Y;number;UL_MID;NA_MID;OB_MID;PO_MID;PT_MID;HS_MID # 13.917641089428125;45.926208653275097;64;16267520;10084270;11026516;10350212;21428337;11070205 # 13.917586202711593;45.926379094149475;65;16267520;10084270;11026516;10350212;21428337;11070213 # 13.917428708459077;45.926521570535712;67;16267520;10084270;11026516;10350212;21428337;11070230 # to: # ==> si-addresses-YYYYMMDD.csv <== # lon;lat;number;street;city;commune;region;postcode;id # 13.917641089428125;45.926208653275097;64;Otlica;Otlica;Ajdovščina;Goriška;5270;11070205 # 13.917586202711593;45.926379094149475;65;Otlica;Otlica;Ajdovščina;Goriška;5270;11070213 # 13.917428708459077;45.926521570535712;67;Otlica;Otlica;Ajdovščina;Goriška;5270;11070230 writer = csv.writer(open(os.path.join(path, 'si-addresses-{}.csv'.format(timestamp)), 'w'), delimiter=";") headers = ['lon', 'lat', 'number', 'street', 'city', 'commune', 'region', 'postcode', 'id'] writer.writerow(headers) with open(os.path.join(path, "addresses-noname.csv")) as f: reader = csv.reader(f, delimiter=';') next(reader) # skip header for rowIn in reader: rowOut = rowIn # round the coordinates to 7 decimals (roughly 1cm precision) rowOut[0] = round(float(rowIn[0]), 7) rowOut[1] = round(float(rowIn[1]), 7) # map IDs to values rowOut[3] = streets.get(rowIn[3], cities.get(rowIn[4], '??')) rowOut[4] = cities.get(rowIn[4], '??') rowOut[5] = communes.get(rowIn[5], '??') rowOut[6] = regions.get(spatregionmap.get(rowIn[6], '??'), '??') rowOut[7] = postcodes.get(rowIn[7], '??') writer.writerow(rowOut) if __name__ == '__main__': main(path=sys.argv[1])
bsd-3-clause
8,621,906,740,754,205,000
34.568807
110
0.632706
false
neurodata/ndstore
scripts/ingest/kanno/denseingest.py
2
3846
# Copyright 2014 NeuroData (http://neurodata.io) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import sys import os import numpy as np from PIL import Image import urllib, urllib2 import cStringIO import collections import zlib import kanno_cy # # ingest the PNG files into the database # """This file is super-customized for the kasthuri annotations data. Probably the biggest idiosyncracy is the handling of slices. They start at 1 and the database aligns slices 1..16, 17..32, etc. So, we try to ingest in that pattern.""" # Stuff we make take from a config or the command line in the future _xtilesz = 10748 _ytilesz = 12896 _startslice = 1840 _endslice = 1849 _prefix = 'Thousands_Bobbysdendrite_404_tb_export_s' #_batchsz = 16 _batchsz = 16 # Shape that we want to ingest into the database. # This should be aligned to the database cube size to perform best. _zingestsz = 16 _yingestsz = 1024 _xingestsz = 1024 def main(): parser = argparse.ArgumentParser(description='Ingest the kasthuri11 dataset annotations.') parser.add_argument('token', action="store", help='Token for the annotation project.') parser.add_argument('resolution', action="store", help='Resolution of the ingest data.') parser.add_argument('path', action="store", help='Directory with annotation PNG files.') result = parser.parse_args() # Get a list of the files in the directories for sl in range (_startslice,_endslice+1,_batchsz): newdata = np.zeros ( [ _batchsz, _ytilesz, _xtilesz ], dtype=np.uint32 ) for b in range ( _batchsz ): if ( sl + b <= _endslice ): filenm = result.path + '/' + _prefix + '{:0>4}'.format(sl+b) + '.png' print filenm tileimage = Image.open ( filenm, 'r' ) imgdata = np.asarray ( tileimage ) newdata[b,:,:] = kanno_cy.pngto32 ( imgdata ) # the last z offset that we ingest, if the batch ends before _batchsz endz = b zlow = sl+1 zhigh = sl+endz+2 ylow = 0 yhigh = _ytilesz xlow = 0 xhigh = _xtilesz # Send a cube at a time to the database for z in range ( zlow, zhigh, _zingestsz ): for y in range ( ylow, yhigh, _yingestsz ): for x in range ( xlow, xhigh, _xingestsz ): # cutout the data data = newdata[ z-zlow:min(zhigh,z+_zingestsz)-zlow,\ y-ylow:min(yhigh,y+_yingestsz)-ylow,\ x-xlow:min(xhigh,x+_xingestsz)-xlow] # check if there's anything to store if ( np.count_nonzero(data) != 0 ): url = 'http://localhost/ocp/ca/%s/npz/%s/%s,%s/%s,%s/%s,%s/' % ( result.token, result.resolution, x, min(xhigh,x+_xingestsz), y, min(yhigh,y+_yingestsz), z, min(zhigh,z+_zingestsz )) print url # Encode the voxelist an pickle fileobj = cStringIO.StringIO () np.save ( fileobj, data ) cdz = zlib.compress (fileobj.getvalue()) # Build the post request req = urllib2.Request(url, cdz) response = urllib2.urlopen(req) the_page = response.read() if __name__ == "__main__": main()
apache-2.0
-363,374,322,400,247,700
31.871795
198
0.617525
false
0ps/plecost
plecost_lib/doc/en/source/conf.py
2
10274
# -*- coding: utf-8 -*- # # Plecost documentation build configuration file, created by # sphinx-quickstart on Wed Feb 11 01:21:33 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import sphinx_rtd_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Plecost' copyright = u'2015, Iniqua Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0.0' # The full version, including alpha/beta/rc tags. release = '1.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Plecost' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Plecost.tex', u'Plecost Documentation', u'Iniqua Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'Plecost', u'Plecost Documentation', [u'Iniqua Team'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Plecost', u'Plecost Documentation', u'Iniqua Team', 'Plecost', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Plecost' epub_author = u'Iniqua Team' epub_publisher = u'Iniqua Team' epub_copyright = u'2015, Iniqua Team' # The basename for the epub file. It defaults to the project name. #epub_basename = u'Plecost' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. #epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Choose between 'default' and 'includehidden'. #epub_tocscope = 'default' # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True
bsd-3-clause
6,861,128,514,571,701,000
30.039275
80
0.707806
false
memeshack/lolgit
hangups/message_parser.py
5
4754
"""Parser for message formatting markup.""" import re from reparser import Parser, Token, MatchGroup from hangups.schemas import SegmentType # Common regex patterns boundary_chars = r'\s`!()\[\]{{}};:\'".,<>?«»“”‘’*_~=' b_left = r'(?:(?<=[' + boundary_chars + r'])|(?<=^))' # Lookbehind b_right = r'(?:(?=[' + boundary_chars + r'])|(?=$))' # Lookahead # Regex patterns used by token definitions markdown_start = b_left + r'(?<!\\){tag}(?!\s)(?!{tag})' markdown_end = r'(?<!{tag})(?<!\s)(?<!\\){tag}' + b_right markdown_link = r'(?<!\\)\[(?P<link>.+?)\]\((?P<url>.+?)\)' html_start = r'(?i)<{tag}>' html_end = r'(?i)</{tag}>' html_link = r'(?i)<a\s+href=[\'"](?P<url>.+?)[\'"]\s*>(?P<link>.+?)</a>' html_img = r'(?i)<img\s+src=[\'"](?P<url>.+?)[\'"]\s*/?>' html_newline = r'(?i)<br\s*/?>' newline = r'\n|\r\n' # Based on URL regex pattern by John Gruber # (http://gist.github.com/gruber/249502) auto_link = (r'(?i)\b(' r'(?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)' r'(?:[^\s()<>]|\((?:[^\s()<>]|(?:\([^\s()<>]+\)))*\))+' r'(?:\((?:[^\s()<>]|(?:\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))') # Precompiled regex for matching protocol part of URL url_proto_regex = re.compile(r'(?i)^[a-z][\w-]+:/{1,3}') # Precompiled regex for removing backslash before escaped Markdown tags markdown_unescape_regex = re.compile(r'\\([*_~=`\[])') def markdown(tag): """Return start and end regex pattern sequences for simple Markdown tag.""" return (markdown_start.format(tag=tag), markdown_end.format(tag=tag)) def html(tag): """Return sequence of start and end regex patterns for simple HTML tag""" return (html_start.format(tag=tag), html_end.format(tag=tag)) def url_complete(url): """If URL doesn't start with protocol, prepend it with http://""" return url if url_proto_regex.search(url) else 'http://' + url class Tokens: """Groups of tokens to be used by ChatMessageParser""" basic = [ Token('link', auto_link, link_target=MatchGroup('start', func=url_complete)), Token('br', newline, text='\n', segment_type=SegmentType.LINE_BREAK) ] markdown = [ Token('md_bi1', *markdown(r'\*\*\*'), is_bold=True, is_italic=True), Token('md_bi2', *markdown(r'___'), is_bold=True, is_italic=True), Token('md_b1', *markdown(r'\*\*'), is_bold=True), Token('md_b2', *markdown(r'__'), is_bold=True), Token('md_i1', *markdown(r'\*'), is_italic=True), Token('md_i2', *markdown(r'_'), is_italic=True), Token('md_pre3', *markdown(r'```'), skip=True), Token('md_pre2', *markdown(r'``'), skip=True), Token('md_pre1', *markdown(r'`'), skip=True), Token('md_s', *markdown(r'~~'), is_strikethrough=True), Token('md_u', *markdown(r'=='), is_underline=True), Token('md_link', markdown_link, text=MatchGroup('link'), link_target=MatchGroup('url', func=url_complete)) ] html = [ Token('html_b1', *html(r'b'), is_bold=True), Token('html_b2', *html(r'strong'), is_bold=True), Token('html_i1', *html(r'i'), is_italic=True), Token('html_i2', *html(r'em'), is_italic=True), Token('html_s1', *html(r's'), is_strikethrough=True), Token('html_s2', *html(r'strike'), is_strikethrough=True), Token('html_s3', *html(r'del'), is_strikethrough=True), Token('html_u1', *html(r'u'), is_underline=True), Token('html_u2', *html(r'ins'), is_underline=True), Token('html_u3', *html(r'mark'), is_underline=True), Token('html_pre', *html(r'pre'), skip=True), Token('html_link', html_link, text=MatchGroup('link'), link_target=MatchGroup('url', func=url_complete)), Token('html_img', html_img, text=MatchGroup('url'), link_target=MatchGroup('url', func=url_complete)), Token('html_br', html_newline, text='\n', segment_type=SegmentType.LINE_BREAK) ] class ChatMessageParser(Parser): """Chat message parser""" def __init__(self, tokens=Tokens.markdown + Tokens.html + Tokens.basic): super().__init__(tokens) def preprocess(self, text): """Preprocess text before parsing""" # Replace two consecutive spaces with space and non-breakable space # (this is how original Hangouts client does it to preserve multiple # spaces) return text.replace(' ', ' \xa0') def postprocess(self, text): """Postprocess text after parsing""" # Remove backslash before escaped Markdown tags return markdown_unescape_regex.sub(r'\1', text)
mit
6,174,893,147,945,424,000
40.526316
93
0.555556
false
googleads/google-ads-python
google/ads/googleads/v7/enums/types/access_invitation_status.py
1
1194
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v7.enums", marshal="google.ads.googleads.v7", manifest={"AccessInvitationStatusEnum",}, ) class AccessInvitationStatusEnum(proto.Message): r"""Container for enum for identifying the status of access invitation """ class AccessInvitationStatus(proto.Enum): r"""Possible access invitation status of a user""" UNSPECIFIED = 0 UNKNOWN = 1 PENDING = 2 DECLINED = 3 EXPIRED = 4 __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
-1,475,745,770,913,457,400
28.85
74
0.69598
false
jonparrott/gcloud-python
spanner/tests/unit/test_streamed.py
3
40891
# Copyright 2016 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock class TestStreamedResultSet(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.streamed import StreamedResultSet return StreamedResultSet def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_defaults(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) self.assertIs(streamed._response_iterator, iterator) self.assertIsNone(streamed._source) self.assertEqual(list(streamed), []) self.assertIsNone(streamed.metadata) self.assertIsNone(streamed.stats) def test_ctor_w_source(self): iterator = _MockCancellableIterator() source = object() streamed = self._make_one(iterator, source=source) self.assertIs(streamed._response_iterator, iterator) self.assertIs(streamed._source, source) self.assertEqual(list(streamed), []) self.assertIsNone(streamed.metadata) self.assertIsNone(streamed.stats) def test_fields_unset(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) with self.assertRaises(AttributeError): streamed.fields @staticmethod def _make_scalar_field(name, type_): from google.cloud.spanner_v1.proto.type_pb2 import StructType from google.cloud.spanner_v1.proto.type_pb2 import Type return StructType.Field(name=name, type=Type(code=type_)) @staticmethod def _make_array_field(name, element_type_code=None, element_type=None): from google.cloud.spanner_v1.proto.type_pb2 import StructType from google.cloud.spanner_v1.proto.type_pb2 import Type if element_type is None: element_type = Type(code=element_type_code) array_type = Type( code='ARRAY', array_element_type=element_type) return StructType.Field(name=name, type=array_type) @staticmethod def _make_struct_type(struct_type_fields): from google.cloud.spanner_v1.proto.type_pb2 import StructType from google.cloud.spanner_v1.proto.type_pb2 import Type fields = [ StructType.Field(name=key, type=Type(code=value)) for key, value in struct_type_fields ] struct_type = StructType(fields=fields) return Type(code='STRUCT', struct_type=struct_type) @staticmethod def _make_value(value): from google.cloud.spanner_v1._helpers import _make_value_pb return _make_value_pb(value) @staticmethod def _make_list_value(values=(), value_pbs=None): from google.protobuf.struct_pb2 import ListValue from google.protobuf.struct_pb2 import Value from google.cloud.spanner_v1._helpers import _make_list_value_pb if value_pbs is not None: return Value(list_value=ListValue(values=value_pbs)) return Value(list_value=_make_list_value_pb(values)) @staticmethod def _make_result_set_metadata(fields=(), transaction_id=None): from google.cloud.spanner_v1.proto.result_set_pb2 import ( ResultSetMetadata) metadata = ResultSetMetadata() for field in fields: metadata.row_type.fields.add().CopyFrom(field) if transaction_id is not None: metadata.transaction.id = transaction_id return metadata @staticmethod def _make_result_set_stats(query_plan=None, **kw): from google.cloud.spanner_v1.proto.result_set_pb2 import ( ResultSetStats) from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1._helpers import _make_value_pb query_stats = Struct(fields={ key: _make_value_pb(value) for key, value in kw.items()}) return ResultSetStats( query_plan=query_plan, query_stats=query_stats, ) @staticmethod def _make_partial_result_set( values, metadata=None, stats=None, chunked_value=False): from google.cloud.spanner_v1.proto.result_set_pb2 import ( PartialResultSet) return PartialResultSet( values=values, metadata=metadata, stats=stats, chunked_value=chunked_value, ) def test_properties_set(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), ] metadata = streamed._metadata = self._make_result_set_metadata(FIELDS) stats = streamed._stats = self._make_result_set_stats() self.assertEqual(list(streamed.fields), FIELDS) self.assertIs(streamed.metadata, metadata) self.assertIs(streamed.stats, stats) def test__merge_chunk_bool(self): from google.cloud.spanner_v1.streamed import Unmergeable iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('registered_voter', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(True) chunk = self._make_value(False) with self.assertRaises(Unmergeable): streamed._merge_chunk(chunk) def test__merge_chunk_int64(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('age', 'INT64'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(42) chunk = self._make_value(13) merged = streamed._merge_chunk(chunk) self.assertEqual(merged.string_value, '4213') self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_float64_nan_string(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('weight', 'FLOAT64'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(u'Na') chunk = self._make_value(u'N') merged = streamed._merge_chunk(chunk) self.assertEqual(merged.string_value, u'NaN') def test__merge_chunk_float64_w_empty(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('weight', 'FLOAT64'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(3.14159) chunk = self._make_value('') merged = streamed._merge_chunk(chunk) self.assertEqual(merged.number_value, 3.14159) def test__merge_chunk_float64_w_float64(self): from google.cloud.spanner_v1.streamed import Unmergeable iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('weight', 'FLOAT64'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(3.14159) chunk = self._make_value(2.71828) with self.assertRaises(Unmergeable): streamed._merge_chunk(chunk) def test__merge_chunk_string(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('name', 'STRING'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(u'phred') chunk = self._make_value(u'wylma') merged = streamed._merge_chunk(chunk) self.assertEqual(merged.string_value, u'phredwylma') self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_string_w_bytes(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('image', 'BYTES'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value( u'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA' u'6fptVAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\n', ) chunk = self._make_value( u'B3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0FNUExF' u'MG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n', ) merged = streamed._merge_chunk(chunk) self.assertEqual( merged.string_value, u'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA6fptVAAAACXBIWXMAAAsTAAAL' u'EwEAmpwYAAAA\nB3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0' u'FNUExFMG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n', ) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_bool(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_array_field('name', element_type_code='BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value([True, True]) chunk = self._make_list_value([False, False, False]) merged = streamed._merge_chunk(chunk) expected = self._make_list_value([True, True, False, False, False]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_int(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_array_field('name', element_type_code='INT64'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value([0, 1, 2]) chunk = self._make_list_value([3, 4, 5]) merged = streamed._merge_chunk(chunk) expected = self._make_list_value([0, 1, 23, 4, 5]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_float(self): import math PI = math.pi EULER = math.e SQRT_2 = math.sqrt(2.0) LOG_10 = math.log(10) iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_array_field('name', element_type_code='FLOAT64'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value([PI, SQRT_2]) chunk = self._make_list_value(['', EULER, LOG_10]) merged = streamed._merge_chunk(chunk) expected = self._make_list_value([PI, SQRT_2, EULER, LOG_10]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_string_with_empty(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_array_field('name', element_type_code='STRING'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value([u'A', u'B', u'C']) chunk = self._make_list_value([]) merged = streamed._merge_chunk(chunk) expected = self._make_list_value([u'A', u'B', u'C']) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_string(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_array_field('name', element_type_code='STRING'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value([u'A', u'B', u'C']) chunk = self._make_list_value([None, u'D', u'E']) merged = streamed._merge_chunk(chunk) expected = self._make_list_value([u'A', u'B', u'C', None, u'D', u'E']) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_string_with_null(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_array_field('name', element_type_code='STRING'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value([u'A', u'B', u'C']) chunk = self._make_list_value([u'D', u'E']) merged = streamed._merge_chunk(chunk) expected = self._make_list_value([u'A', u'B', u'CD', u'E']) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_array_of_int(self): from google.cloud.spanner_v1.proto.type_pb2 import StructType from google.cloud.spanner_v1.proto.type_pb2 import Type subarray_type = Type( code='ARRAY', array_element_type=Type(code='INT64')) array_type = Type(code='ARRAY', array_element_type=subarray_type) iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ StructType.Field(name='loloi', type=array_type) ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value(value_pbs=[ self._make_list_value([0, 1]), self._make_list_value([2]), ]) chunk = self._make_list_value(value_pbs=[ self._make_list_value([3]), self._make_list_value([4, 5]), ]) merged = streamed._merge_chunk(chunk) expected = self._make_list_value(value_pbs=[ self._make_list_value([0, 1]), self._make_list_value([23]), self._make_list_value([4, 5]), ]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_array_of_string(self): from google.cloud.spanner_v1.proto.type_pb2 import StructType from google.cloud.spanner_v1.proto.type_pb2 import Type subarray_type = Type( code='ARRAY', array_element_type=Type(code='STRING')) array_type = Type(code='ARRAY', array_element_type=subarray_type) iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ StructType.Field(name='lolos', type=array_type) ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_list_value(value_pbs=[ self._make_list_value([u'A', u'B']), self._make_list_value([u'C']), ]) chunk = self._make_list_value(value_pbs=[ self._make_list_value([u'D']), self._make_list_value([u'E', u'F']), ]) merged = streamed._merge_chunk(chunk) expected = self._make_list_value(value_pbs=[ self._make_list_value([u'A', u'B']), self._make_list_value([u'CD']), self._make_list_value([u'E', u'F']), ]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_struct(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) struct_type = self._make_struct_type([ ('name', 'STRING'), ('age', 'INT64'), ]) FIELDS = [ self._make_array_field('test', element_type=struct_type), ] streamed._metadata = self._make_result_set_metadata(FIELDS) partial = self._make_list_value([u'Phred ']) streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) rest = self._make_list_value([u'Phlyntstone', 31]) chunk = self._make_list_value(value_pbs=[rest]) merged = streamed._merge_chunk(chunk) struct = self._make_list_value([u'Phred Phlyntstone', 31]) expected = self._make_list_value(value_pbs=[struct]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_struct_unmergeable(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) struct_type = self._make_struct_type([ ('name', 'STRING'), ('registered', 'BOOL'), ('voted', 'BOOL'), ]) FIELDS = [ self._make_array_field('test', element_type=struct_type), ] streamed._metadata = self._make_result_set_metadata(FIELDS) partial = self._make_list_value([u'Phred Phlyntstone', True]) streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) rest = self._make_list_value([True]) chunk = self._make_list_value(value_pbs=[rest]) merged = streamed._merge_chunk(chunk) struct = self._make_list_value([u'Phred Phlyntstone', True, True]) expected = self._make_list_value(value_pbs=[struct]) self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test_merge_values_empty_and_empty(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._current_row = [] streamed._merge_values([]) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, []) def test_merge_values_empty_and_partial(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BARE = [u'Phred Phlyntstone', 42] VALUES = [self._make_value(bare) for bare in BARE] streamed._current_row = [] streamed._merge_values(VALUES) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BARE) def test_merge_values_empty_and_filled(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BARE = [u'Phred Phlyntstone', 42, True] VALUES = [self._make_value(bare) for bare in BARE] streamed._current_row = [] streamed._merge_values(VALUES) self.assertEqual(list(streamed), [BARE]) self.assertEqual(streamed._current_row, []) def test_merge_values_empty_and_filled_plus(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BARE = [ u'Phred Phlyntstone', 42, True, u'Bharney Rhubble', 39, True, u'Wylma Phlyntstone', ] VALUES = [self._make_value(bare) for bare in BARE] streamed._current_row = [] streamed._merge_values(VALUES) self.assertEqual(list(streamed), [BARE[0:3], BARE[3:6]]) self.assertEqual(streamed._current_row, BARE[6:]) def test_merge_values_partial_and_empty(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [ u'Phred Phlyntstone' ] streamed._current_row[:] = BEFORE streamed._merge_values([]) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BEFORE) def test_merge_values_partial_and_partial(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [u'Phred Phlyntstone'] streamed._current_row[:] = BEFORE MERGED = [42] TO_MERGE = [self._make_value(item) for item in MERGED] streamed._merge_values(TO_MERGE) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BEFORE + MERGED) def test_merge_values_partial_and_filled(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [ u'Phred Phlyntstone' ] streamed._current_row[:] = BEFORE MERGED = [42, True] TO_MERGE = [self._make_value(item) for item in MERGED] streamed._merge_values(TO_MERGE) self.assertEqual(list(streamed), [BEFORE + MERGED]) self.assertEqual(streamed._current_row, []) def test_merge_values_partial_and_filled_plus(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [ self._make_value(u'Phred Phlyntstone') ] streamed._current_row[:] = BEFORE MERGED = [ 42, True, u'Bharney Rhubble', 39, True, u'Wylma Phlyntstone', ] TO_MERGE = [self._make_value(item) for item in MERGED] VALUES = BEFORE + MERGED streamed._merge_values(TO_MERGE) self.assertEqual(list(streamed), [VALUES[0:3], VALUES[3:6]]) self.assertEqual(streamed._current_row, VALUES[6:]) def test_one_or_none_no_value(self): streamed = self._make_one(_MockCancellableIterator()) with mock.patch.object(streamed, '_consume_next') as consume_next: consume_next.side_effect = StopIteration self.assertIsNone(streamed.one_or_none()) def test_one_or_none_single_value(self): streamed = self._make_one(_MockCancellableIterator()) streamed._rows = ['foo'] with mock.patch.object(streamed, '_consume_next') as consume_next: consume_next.side_effect = StopIteration self.assertEqual(streamed.one_or_none(), 'foo') def test_one_or_none_multiple_values(self): streamed = self._make_one(_MockCancellableIterator()) streamed._rows = ['foo', 'bar'] with self.assertRaises(ValueError): streamed.one_or_none() def test_one_or_none_consumed_stream(self): streamed = self._make_one(_MockCancellableIterator()) streamed._metadata = object() with self.assertRaises(RuntimeError): streamed.one_or_none() def test_one_single_value(self): streamed = self._make_one(_MockCancellableIterator()) streamed._rows = ['foo'] with mock.patch.object(streamed, '_consume_next') as consume_next: consume_next.side_effect = StopIteration self.assertEqual(streamed.one(), 'foo') def test_one_no_value(self): from google.cloud import exceptions iterator = _MockCancellableIterator(['foo']) streamed = self._make_one(iterator) with mock.patch.object(streamed, '_consume_next') as consume_next: consume_next.side_effect = StopIteration with self.assertRaises(exceptions.NotFound): streamed.one() def test_consume_next_empty(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) with self.assertRaises(StopIteration): streamed._consume_next() def test_consume_next_first_set_partial(self): TXN_ID = b'DEADBEEF' FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] metadata = self._make_result_set_metadata( FIELDS, transaction_id=TXN_ID) BARE = [u'Phred Phlyntstone', 42] VALUES = [self._make_value(bare) for bare in BARE] result_set = self._make_partial_result_set(VALUES, metadata=metadata) iterator = _MockCancellableIterator(result_set) source = mock.Mock(_transaction_id=None, spec=['_transaction_id']) streamed = self._make_one(iterator, source=source) streamed._consume_next() self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BARE) self.assertEqual(streamed.metadata, metadata) self.assertEqual(source._transaction_id, TXN_ID) def test_consume_next_first_set_partial_existing_txn_id(self): TXN_ID = b'DEADBEEF' FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] metadata = self._make_result_set_metadata( FIELDS, transaction_id=b'') BARE = [u'Phred Phlyntstone', 42] VALUES = [self._make_value(bare) for bare in BARE] result_set = self._make_partial_result_set(VALUES, metadata=metadata) iterator = _MockCancellableIterator(result_set) source = mock.Mock(_transaction_id=TXN_ID, spec=['_transaction_id']) streamed = self._make_one(iterator, source=source) streamed._consume_next() self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BARE) self.assertEqual(streamed.metadata, metadata) self.assertEqual(source._transaction_id, TXN_ID) def test_consume_next_w_partial_result(self): FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] VALUES = [ self._make_value(u'Phred '), ] result_set = self._make_partial_result_set(VALUES, chunked_value=True) iterator = _MockCancellableIterator(result_set) streamed = self._make_one(iterator) streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._consume_next() self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, []) self.assertEqual(streamed._pending_chunk, VALUES[0]) def test_consume_next_w_pending_chunk(self): FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] BARE = [ u'Phlyntstone', 42, True, u'Bharney Rhubble', 39, True, u'Wylma Phlyntstone', ] VALUES = [self._make_value(bare) for bare in BARE] result_set = self._make_partial_result_set(VALUES) iterator = _MockCancellableIterator(result_set) streamed = self._make_one(iterator) streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._pending_chunk = self._make_value(u'Phred ') streamed._consume_next() self.assertEqual(list(streamed), [ [u'Phred Phlyntstone', BARE[1], BARE[2]], [BARE[3], BARE[4], BARE[5]], ]) self.assertEqual(streamed._current_row, [BARE[6]]) self.assertIsNone(streamed._pending_chunk) def test_consume_next_last_set(self): FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] metadata = self._make_result_set_metadata(FIELDS) stats = self._make_result_set_stats( rows_returned="1", elapsed_time="1.23 secs", cpu_time="0.98 secs", ) BARE = [u'Phred Phlyntstone', 42, True] VALUES = [self._make_value(bare) for bare in BARE] result_set = self._make_partial_result_set(VALUES, stats=stats) iterator = _MockCancellableIterator(result_set) streamed = self._make_one(iterator) streamed._metadata = metadata streamed._consume_next() self.assertEqual(list(streamed), [BARE]) self.assertEqual(streamed._current_row, []) self.assertEqual(streamed._stats, stats) def test___iter___empty(self): iterator = _MockCancellableIterator() streamed = self._make_one(iterator) found = list(streamed) self.assertEqual(found, []) def test___iter___one_result_set_partial(self): FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] metadata = self._make_result_set_metadata(FIELDS) BARE = [u'Phred Phlyntstone', 42] VALUES = [self._make_value(bare) for bare in BARE] result_set = self._make_partial_result_set(VALUES, metadata=metadata) iterator = _MockCancellableIterator(result_set) streamed = self._make_one(iterator) found = list(streamed) self.assertEqual(found, []) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BARE) self.assertEqual(streamed.metadata, metadata) def test___iter___multiple_result_sets_filled(self): FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] metadata = self._make_result_set_metadata(FIELDS) BARE = [ u'Phred Phlyntstone', 42, True, u'Bharney Rhubble', 39, True, u'Wylma Phlyntstone', 41, True, ] VALUES = [self._make_value(bare) for bare in BARE] result_set1 = self._make_partial_result_set( VALUES[:4], metadata=metadata) result_set2 = self._make_partial_result_set(VALUES[4:]) iterator = _MockCancellableIterator(result_set1, result_set2) streamed = self._make_one(iterator) found = list(streamed) self.assertEqual(found, [ [BARE[0], BARE[1], BARE[2]], [BARE[3], BARE[4], BARE[5]], [BARE[6], BARE[7], BARE[8]], ]) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, []) self.assertIsNone(streamed._pending_chunk) def test___iter___w_existing_rows_read(self): FIELDS = [ self._make_scalar_field('full_name', 'STRING'), self._make_scalar_field('age', 'INT64'), self._make_scalar_field('married', 'BOOL'), ] metadata = self._make_result_set_metadata(FIELDS) ALREADY = [ [u'Pebbylz Phlyntstone', 4, False], [u'Dino Rhubble', 4, False], ] BARE = [ u'Phred Phlyntstone', 42, True, u'Bharney Rhubble', 39, True, u'Wylma Phlyntstone', 41, True, ] VALUES = [self._make_value(bare) for bare in BARE] result_set1 = self._make_partial_result_set( VALUES[:4], metadata=metadata) result_set2 = self._make_partial_result_set(VALUES[4:]) iterator = _MockCancellableIterator(result_set1, result_set2) streamed = self._make_one(iterator) streamed._rows[:] = ALREADY found = list(streamed) self.assertEqual(found, ALREADY + [ [BARE[0], BARE[1], BARE[2]], [BARE[3], BARE[4], BARE[5]], [BARE[6], BARE[7], BARE[8]], ]) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, []) self.assertIsNone(streamed._pending_chunk) class _MockCancellableIterator(object): cancel_calls = 0 def __init__(self, *values): self.iter_values = iter(values) def next(self): return next(self.iter_values) def __next__(self): # pragma: NO COVER Py3k return self.next() class TestStreamedResultSet_JSON_acceptance_tests(unittest.TestCase): _json_tests = None def _getTargetClass(self): from google.cloud.spanner_v1.streamed import StreamedResultSet return StreamedResultSet def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def _load_json_test(self, test_name): import os if self.__class__._json_tests is None: dirname = os.path.dirname(__file__) filename = os.path.join( dirname, 'streaming-read-acceptance-test.json') raw = _parse_streaming_read_acceptance_tests(filename) tests = self.__class__._json_tests = {} for (name, partial_result_sets, results) in raw: tests[name] = partial_result_sets, results return self.__class__._json_tests[test_name] # Non-error cases def _match_results(self, testcase_name, assert_equality=None): partial_result_sets, expected = self._load_json_test(testcase_name) iterator = _MockCancellableIterator(*partial_result_sets) partial = self._make_one(iterator) if assert_equality is not None: assert_equality(list(partial), expected) else: self.assertEqual(list(partial), expected) def test_basic(self): self._match_results('Basic Test') def test_string_chunking(self): self._match_results('String Chunking Test') def test_string_array_chunking(self): self._match_results('String Array Chunking Test') def test_string_array_chunking_with_nulls(self): self._match_results('String Array Chunking Test With Nulls') def test_string_array_chunking_with_empty_strings(self): self._match_results('String Array Chunking Test With Empty Strings') def test_string_array_chunking_with_one_large_string(self): self._match_results('String Array Chunking Test With One Large String') def test_int64_array_chunking(self): self._match_results('INT64 Array Chunking Test') def test_float64_array_chunking(self): import math def assert_float_equality(lhs, rhs): # NaN, +Inf, and -Inf can't be tested for equality if lhs is None: self.assertIsNone(rhs) elif math.isnan(lhs): self.assertTrue(math.isnan(rhs)) elif math.isinf(lhs): self.assertTrue(math.isinf(rhs)) # but +Inf and -Inf can be tested for magnitude self.assertTrue((lhs > 0) == (rhs > 0)) else: self.assertEqual(lhs, rhs) def assert_rows_equality(lhs, rhs): self.assertEqual(len(lhs), len(rhs)) for l_rows, r_rows in zip(lhs, rhs): self.assertEqual(len(l_rows), len(r_rows)) for l_row, r_row in zip(l_rows, r_rows): self.assertEqual(len(l_row), len(r_row)) for l_cell, r_cell in zip(l_row, r_row): assert_float_equality(l_cell, r_cell) self._match_results( 'FLOAT64 Array Chunking Test', assert_rows_equality) def test_struct_array_chunking(self): self._match_results('Struct Array Chunking Test') def test_nested_struct_array(self): self._match_results('Nested Struct Array Test') def test_nested_struct_array_chunking(self): self._match_results('Nested Struct Array Chunking Test') def test_struct_array_and_string_chunking(self): self._match_results('Struct Array And String Chunking Test') def test_multiple_row_single_chunk(self): self._match_results('Multiple Row Single Chunk') def test_multiple_row_multiple_chunks(self): self._match_results('Multiple Row Multiple Chunks') def test_multiple_row_chunks_non_chunks_interleaved(self): self._match_results('Multiple Row Chunks/Non Chunks Interleaved') def _generate_partial_result_sets(prs_text_pbs): from google.protobuf.json_format import Parse from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet partial_result_sets = [] for prs_text_pb in prs_text_pbs: prs = PartialResultSet() partial_result_sets.append(Parse(prs_text_pb, prs)) return partial_result_sets def _normalize_int_array(cell): normalized = [] for subcell in cell: if subcell is not None: subcell = int(subcell) normalized.append(subcell) return normalized def _normalize_float(cell): if cell == u'Infinity': return float('inf') if cell == u'-Infinity': return float('-inf') if cell == u'NaN': return float('nan') if cell is not None: return float(cell) def _normalize_results(rows_data, fields): """Helper for _parse_streaming_read_acceptance_tests""" from google.cloud.spanner_v1.proto import type_pb2 normalized = [] for row_data in rows_data: row = [] assert len(row_data) == len(fields) for cell, field in zip(row_data, fields): if field.type.code == type_pb2.INT64: cell = int(cell) if field.type.code == type_pb2.FLOAT64: cell = _normalize_float(cell) elif field.type.code == type_pb2.BYTES: cell = cell.encode('utf8') elif field.type.code == type_pb2.ARRAY: if field.type.array_element_type.code == type_pb2.INT64: cell = _normalize_int_array(cell) elif field.type.array_element_type.code == type_pb2.FLOAT64: cell = [_normalize_float(subcell) for subcell in cell] row.append(cell) normalized.append(row) return normalized def _parse_streaming_read_acceptance_tests(filename): """Parse acceptance tests from JSON See streaming-read-acceptance-test.json """ import json with open(filename) as json_file: test_json = json.load(json_file) for test in test_json['tests']: name = test['name'] partial_result_sets = _generate_partial_result_sets(test['chunks']) fields = partial_result_sets[0].metadata.row_type.fields result = _normalize_results(test['result']['value'], fields) yield name, partial_result_sets, result
apache-2.0
359,996,281,145,104,000
37.759242
79
0.60627
false
dougfelt/nototools
nototools/generate_fontdiff_input.py
2
1951
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates fontdiff HTML input with all glyphs shared by two fonts. Usage: "python generate_input.py [font_path_a] [font_path_b] [specimen_path]". Each glyph will be put on its own line in the output HTML. """ import sys from fontTools import ttLib from nototools import hb_input def main(font_path_a, font_path_b, specimen_path): generator = hb_input.HbInputGenerator(ttLib.TTFont(font_path_a)) inputs_a = generator.all_inputs(warn=True) generator = hb_input.HbInputGenerator(ttLib.TTFont(font_path_b)) inputs_b = set(generator.all_inputs(warn=True)) to_ignore = ('\00', '\02') to_replace = (('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;')) out_lines = ['<html>'] for features, text in [i for i in inputs_a if i in inputs_b]: if any(char in text for char in to_ignore): continue for old, new in to_replace: text = text.replace(old, new) style = '' if features: style = (' style="font-feature-settings: %s;"' % ', '.join("'%s'" % f for f in features)) out_lines.append('<p%s>%s</p>' % (style, text)) out_lines.append('</html>') out_text = '\n'.join(out_lines) with open(specimen_path, 'w') as out_file: out_file.write(out_text.encode('utf-8')) if __name__ == '__main__': main(*sys.argv[1:])
apache-2.0
-2,246,089,513,569,353,000
33.839286
78
0.64326
false
ssarangi/numba
numba/cuda/tests/cudadrv/test_reset_device.py
8
1184
from __future__ import print_function, absolute_import, division import threading from numba import cuda from numba.cuda.cudadrv.driver import driver from numba.cuda.testing import unittest, CUDATestCase try: from Queue import Queue # Python 2 except: from queue import Queue # Python 3 class TestResetDevice(CUDATestCase): def test_reset_device(self): def newthread(exception_queue): try: devices = range(driver.get_device_count()) for _ in range(2): for d in devices: cuda.select_device(d) cuda.close() except Exception as e: exception_queue.put(e) # Do test on a separate thread so that we don't affect # the current context in the main thread. exception_queue = Queue() t = threading.Thread(target=newthread, args=(exception_queue,)) t.start() t.join() exceptions = [] while not exception_queue.empty(): exceptions.append(exception_queue.get()) self.assertEqual(exceptions, []) if __name__ == '__main__': unittest.main()
bsd-2-clause
-888,023,770,386,663,700
27.878049
71
0.592905
false
tomka/CATMAID
django/applications/catmaid/control/point.py
2
6638
# -*- coding: utf-8 -*- from django.shortcuts import get_object_or_404 from django.utils.decorators import method_decorator from catmaid.control.authentication import requires_user_role, can_edit_or_fail from catmaid.models import Point, UserRole from catmaid.serializers import PointSerializer from rest_framework.request import Request from rest_framework.response import Response from rest_framework.views import APIView class PointList(APIView): @method_decorator(requires_user_role(UserRole.Browse)) def get(self, request:Request, project_id) -> Response: # XXX: Why is this an object method when everything else is class methods? """List points, optionally constrained by various properties. --- parameters: - name: project_id description: Project of points type: integer paramType: path required: true """ points = Point.objects.all() serializer = PointSerializer(points, many=True) return Response(serializer.data) @method_decorator(requires_user_role(UserRole.Annotate)) def put(request:Request, project_id) -> Response: """Add a new point. Expect at least the location as parameters. --- parameters: - name: project_id description: Project of points type: integer paramType: path required: true - name: location_x description: X coordinate type: float paramType: form required: true - name: location_y description: Y coordinate type: float paramType: form required: true - name: location_z description: Z coordinate type: float paramType: form required: true - name: radius description: Optional radius type: float paramType: form required: false - name: confidence description: Optional confidence in [0,5] type: integer paramType: form required: false """ location_x = float(request.POST.get('x')) location_y = float(request.POST.get('y')) location_z = float(request.POST.get('z')) radius = float(request.POST.get('radius', 0)) confidence = min(max(int(request.POST.get('confidence'), 0), 0), 5) point = Point.objects.create(project_id=project_id, user=request.user, editor=request.user, location_x=location_x, location_y=location_y, location_z=location_z, radius=radius, confidence=confidence) point.save() serializer = PointSerializer(point) return Response(serializer.data) class PointDetail(APIView): @method_decorator(requires_user_role(UserRole.Browse)) def get(request:Request, project_id, point_id) -> Response: """Return details on one particular point. --- parameters: - name: project_id description: Project point is part of type: integer paramType: path required: true - name: point_id description: ID of point type: integer paramType: path required: true """ point = get_object_or_404(Point, pk=point_id, project_id=project_id) serializer = PointSerializer(point) return Response(serializer.data) @method_decorator(requires_user_role(UserRole.Annotate)) def post(request:Request, project_id, point_id) -> Response: """Update one particular point. Requires at least one field to change. --- parameters: - name: project_id description: Project point is part of type: integer paramType: path required: true - name: point_id description: ID of point type: integer paramType: path required: true - name: location_x description: X coordinate type: float paramType: form required: false - name: location_y description: Y coordinate type: float paramType: form required: false - name: location_z description: Z coordinate type: float paramType: form required: false - name: radius description: Optional radius type: float paramType: form required: false - name: confidence description: Optional confidence in [0,5] type: integer paramType: form required: false """ can_edit_or_fail(request.user, point_id, 'point') updated_fields = {} if request.POST.has('x'): updated_fields['location_x'] = float(request.POST.get('x')) if request.POST.has('y'): updated_fields['location_y'] = float(request.POST.get('y')) if request.POST.has('z'): updated_fields['location_z'] = float(request.POST.get('z')) if request.POST.has('radius'): updated_fields['radius'] = float(request.POST.get('radius')) if request.POST.has('confidence'): confidence = max(min(int(request.POST.get('confidence')), 5), 0) updated_fields['confidence'] = confidence if not updated_fields: raise ValueError('No field to modify provided') point = get_object_or_404(Point, pk=point_id, project_id=project_id) point.update(**updated_fields) point.save() serializer = PointSerializer(point) return Response(serializer.data) @method_decorator(requires_user_role(UserRole.Annotate)) def delete(request:Request, project_id, point_id) -> Response: """Delete one particular point. --- parameters: - name: project_id description: Project point is part of type: integer paramType: path required: true - name: point_id description: ID of point type: integer paramType: path required: true """ can_edit_or_fail(request.user, point_id, 'point') point = get_object_or_404(Point, pk=point_id, project_id=project_id) point.delete() point.id = None serializer = PointSerializer(point) return Response(serializer.data)
gpl-3.0
-7,236,466,781,905,183,000
32.867347
134
0.576077
false
martong/python-mode
pymode/libs/pylama/config.py
13
6016
""" Parse arguments from command line and configuration files. """ import fnmatch import sys import os from re import compile as re import logging from argparse import ArgumentParser from . import __version__ from .libs.inirama import Namespace from .lint.extensions import LINTERS # Setup a logger LOGGER = logging.getLogger('pylama') LOGGER.propagate = False STREAM = logging.StreamHandler(sys.stdout) LOGGER.addHandler(STREAM) #: A default checkers DEFAULT_LINTERS = 'pep8', 'pyflakes', 'mccabe' CURDIR = os.getcwd() CONFIG_FILES = [ os.path.join(CURDIR, basename) for basename in ('pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini') ] class _Default(object): def __init__(self, value=None): self.value = value def __str__(self): return str(self.value) def __repr__(self): return "<_Default [%s]>" % self.value def split_csp_str(s): """ Split commaseparated string. :returns: list of splitted values """ if isinstance(s, (list, tuple)): return s return list(set(i for i in s.strip().split(',') if i)) def parse_linters(linters): """ Initialize choosen linters. :returns: list of inited linters """ result = list() for name in split_csp_str(linters): linter = LINTERS.get(name) if linter: result.append((name, linter)) else: logging.warn("Linter `%s` not found.", name) return result PARSER = ArgumentParser(description="Code audit tool for python.") PARSER.add_argument( "path", nargs='?', default=_Default(CURDIR), help="Path on file or directory for code check.") PARSER.add_argument( "--verbose", "-v", action='store_true', help="Verbose mode.") PARSER.add_argument('--version', action='version', version='%(prog)s ' + __version__) PARSER.add_argument( "--format", "-f", default=_Default('pep8'), choices=['pep8', 'pylint'], help="Choose errors format (pep8, pylint).") PARSER.add_argument( "--select", "-s", default=_Default(''), type=split_csp_str, help="Select errors and warnings. (comma-separated list)") PARSER.add_argument( "--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)), type=parse_linters, help=( "Select linters. (comma-separated). Choices are %s." % ','.join(s for s in LINTERS.keys()) )) PARSER.add_argument( "--ignore", "-i", default=_Default(''), type=split_csp_str, help="Ignore errors and warnings. (comma-separated)") PARSER.add_argument( "--skip", default=_Default(''), type=lambda s: [re(fnmatch.translate(p)) for p in s.split(',') if p], help="Skip files by masks (comma-separated, Ex. */messages.py)") PARSER.add_argument("--report", "-r", help="Send report to file [REPORT]") PARSER.add_argument( "--hook", action="store_true", help="Install Git (Mercurial) hook.") PARSER.add_argument( "--async", action="store_true", help="Enable async mode. Usefull for checking a lot of files. " "Dont supported with pylint.") PARSER.add_argument( "--options", "-o", default="", help="Select configuration file. By default is '<CURDIR>/pylama.ini'") PARSER.add_argument( "--force", "-F", action='store_true', default=_Default(False), help="Force code checking (if linter doesnt allow)") ACTIONS = dict((a.dest, a) for a in PARSER._actions) def parse_options(args=None, config=True, **overrides): # noqa """ Parse options from command line and configuration files. :return argparse.Namespace: """ if args is None: args = [] # Parse args from command string options = PARSER.parse_args(args) options.file_params = dict() options.linters_params = dict() # Override options for k, v in overrides.items(): passed_value = getattr(options, k, _Default()) if isinstance(passed_value, _Default): setattr(options, k, _Default(v)) # Compile options from ini if config: cfg = get_config(str(options.options)) for k, v in cfg.default.items(): LOGGER.info('Find option %s (%s)', k, v) passed_value = getattr(options, k, _Default()) if isinstance(passed_value, _Default): setattr(options, k, _Default(v)) # Parse file related options for name, opts in cfg.sections.items(): if not name.startswith('pylama'): continue if name == cfg.default_section: continue name = name[7:] if name in LINTERS: options.linters_params[name] = dict(opts) continue mask = re(fnmatch.translate(name)) options.file_params[mask] = dict(opts) # Postprocess options opts = dict(options.__dict__.items()) for name, value in opts.items(): if isinstance(value, _Default): setattr(options, name, process_value(name, value.value)) return options def process_value(name, value): """ Compile option value. """ action = ACTIONS.get(name) if not action: return value if callable(action.type): return action.type(value) if action.const: return bool(int(value)) return value def get_config(ini_path=None): """ Load configuration from INI. :return Namespace: """ config = Namespace() config.default_section = 'pylama' if not ini_path: for path in CONFIG_FILES: if os.path.isfile(path) and os.access(path, os.R_OK): config.read(path) else: config.read(ini_path) return config def setup_logger(options): """ Setup logger with options. """ LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN) if options.report: LOGGER.removeHandler(STREAM) LOGGER.addHandler(logging.FileHandler(options.report, mode='w')) LOGGER.info('Try to read configuration from: ' + options.options) # pylama:ignore=W0212
lgpl-3.0
-2,540,613,288,940,328,400
25.737778
75
0.620678
false
numerigraphe/odoomrp-wip
mrp_project_link/models/project_project.py
16
2484
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from openerp import models, fields, api class ProjectProject(models.Model): _inherit = 'project.project' @api.one def _project_shortcut_count(self): production_obj = self.env['mrp.production'] procurement_obj = self.env['procurement.order'] productions = production_obj.search([('project_id', '=', self.id)]) procurements = procurement_obj.search([('main_project_id', '=', self.id)]) self.production_count = len(productions) self.procurement_count = len(procurements) production_count = fields.Integer(string='Manufacturing Count', compute=_project_shortcut_count) procurement_count = fields.Integer(string='Procurement Count', compute=_project_shortcut_count) class Task(models.Model): _inherit = "project.task" mrp_production_id = fields.Many2one('mrp.production', string='Manufacturing Order') mrp_sch_products = fields.One2many( "mrp.production.product.line", 'task_id', related='mrp_production_id.product_lines', store=False, string='Scheduled Products') wk_sch_products = fields.One2many( "mrp.production.product.line", 'task_id', related='wk_order.product_line', store=False, string='Scheduled Products') final_product = fields.Many2one('product.product', string='Product to Produce', store=False, related='mrp_production_id.product_id')
agpl-3.0
7,232,300,517,557,215,000
42.578947
78
0.586957
false
Aaronneyer/kraller
kraller/kraller.py
3
8373
#!/usr/bin/python # -*- coding: utf-8 -*- """ Kraller An application to allow signups for accounts on a server with a key. """ from functools import wraps import logging from logging.handlers import SMTPHandler import os import re from urllib import urlencode from flask import Flask, abort, render_template, redirect, request, session, url_for, flash, send_from_directory from flask.ext.wtf import Form from wtforms import BooleanField, TextField, TextAreaField from wtforms.validators import Required import requests from itsdangerous_session import ItsDangerousSessionInterface from user_management import create_user, add_ssh_key, try_getpwnam app = Flask(__name__) app.config.from_envvar('KRALLER_SETTINGS') app.session_interface = ItsDangerousSessionInterface() if not app.debug: mail_handler = SMTPHandler('127.0.0.1', '[email protected]', ['root'], 'kraller log') mail_handler.setLevel(logging.WARNING) app.logger.addHandler(mail_handler) username_re = "^[a-z]{3}[0-9]*$" gecos_re = "^[A-Za-z0-9.' ()+-]*$" ssh_key_re = "^[A-Za-z0-9@: .\/=+-]*$" def my_cas_endpoint(redirect_to=None): """returns the URL that should be passed to the CAS server under the 'service' parameter. It's where the CAS server should redirect the user after it has done its job""" if redirect_to is None: redirect_to = request.path return url_for('login', redirect_to=redirect_to, _external=True) def cas_login_url(redirect_to=None): """returns a URL for the CAS server to send the user to. Once done, the CAS server will send the user back to redirect_to.""" return app.config['CAS_SERVER_ENDPOINT'] + 'login?' + urlencode(dict(service=my_cas_endpoint(redirect_to), renew='true')) def logged_in_url(url): if 'username' in session: return url else: return cas_login_url(url) def requires_auth(f): """decorates a url handler, requiring that the user be authenticated before seeing it.""" @wraps(f) def decorated(*args, **kwargs): if 'username' in session: return f(*args, **kwargs) else: return redirect(cas_login_url()) return decorated def in_blacklist(name): return name in map(lambda x: x.strip(), open(app.config['BLACKLIST_FILE']).readlines()) @app.route('/login') def login(): if not 'ticket' in request.args and 'redirect_to' in request.args: return abort(401) ticket = request.args.get('ticket') redirect_to = request.args.get('redirect_to') r = requests.get(app.config['CAS_SERVER_ENDPOINT'] + 'validate', params=dict(service=my_cas_endpoint(redirect_to), ticket=ticket), verify=True) if not r.status_code == requests.codes.ok: app.logger.warning('Got bad response code from CAS validate endpoint') return abort(500) response_lines = r.text.splitlines() if len(response_lines) != 2: app.logger.warning('Got malformed response from CAS validate endpoint') return abort(500) (answer, username) = response_lines if answer == 'yes': # set cookie and redirect session['username'] = username return redirect(redirect_to) else: app.logger.warning('CAS repudiated a ticket we got from the user') abort(401) @app.route('/logout') def logout(): session.pop('username', None) return redirect('/') @app.route('/') def index(): user = ('username' in session) and not try_getpwnam(session['username']) return render_template('index.tmpl', signup_url=logged_in_url('/signup'),user=user) class SignupForm(Form): name = TextField('Full Name', [Required()]) phone = TextField('Phone Number (optional)', []) ssh_key = TextAreaField('SSH Key', [Required()]) accept_tos = BooleanField(None, [Required()]) class AddKeyForm(Form): ssh_key = TextAreaField('SSH Key', [Required()]) @app.route('/signup', methods=['GET', 'POST']) @requires_auth def signup(): if request.method == 'GET': if 'username' in session: username = session['username'] # the user is logged in if not try_getpwnam(username): # the user doesn't yet have an account form = SignupForm() return render_template('signup.tmpl', form=form) else: return redirect('/add_key') username = session['username'] if try_getpwnam(username): flash('You are already registered.') return render_template('success.tmpl') form = SignupForm() if not form.validate_on_submit(): flash('There was an error submitting the form!') return render_template('signup.tmpl', form=form) name = form.name.data.strip() phone = form.phone.data.strip() ssh_key = form.ssh_key.data.strip() # before proceeding, check that all fields are sane valid = { 'username': re.match(username_re, username), 'name' : re.match(gecos_re, name), 'phone' : re.match(gecos_re, phone), 'ssh_key': re.match(ssh_key_re, ssh_key) } if not all(valid.values()): if not valid['username']: flash("I don't like the look of your username.") app.logger.warning('Username failed validation. Why is this happening?') if not valid['name']: flash("I prefer names consisting only of alphanumerics, apostrophes, and periods.") if not valid['phone']: flash("Your phone number looks weird to me. Try sticking to the basics.") if not valid['ssh_key']: flash("Are you sure that's an SSH key? Please check the entry and dial again.") return render_template('signup.tmpl', form=form) if in_blacklist(username): flash('You are blacklisted.') app.logger.warning('Blacklisted user attempted to sign up') return render_template('signup.tmpl', form=form) if create_user(username, name, '', '', phone): flash('There was an error creating a user account for you.') app.logger.warning('Error creating user account') return render_template('signup.tmpl', form=form) if add_ssh_key(username, ssh_key): app.logger.warning('Error adding ssh key') flash('Something went wrong when adding your ssh key.') return render_template('signup.tmpl', form=form) # Success! return render_template('success.tmpl') @app.route('/add_key', methods=['POST','GET']) @requires_auth def add_key(): if request.method == 'GET': if 'username' in session: username = session['username'] # the user is logged in if not try_getpwnam(username): return redirect('/signup') else: form = AddKeyForm() return render_template('add_key.tmpl', form=form) username = session['username'] form = AddKeyForm() if not form.validate_on_submit(): flash('There was an error submitting the form!') return render_template('add_key.tmpl', form=form) ssh_key = form.ssh_key.data.strip() # before proceeding, check that all fields are sane valid = { 'username': re.match(username_re, username), 'ssh_key': re.match(ssh_key_re, ssh_key) } if not all(valid.values()): if not valid['username']: flash("I don't like the look of your username.") app.logger.warning('Username failed validation. Why is this happening?') if not valid['ssh_key']: flash("Are you sure that's an SSH key? Please check the entry and dial again.") return render_template('add_key.tmpl', form=form) if add_ssh_key(username, ssh_key): app.logger.warning('Error adding ssh key') flash('Something went wrong when adding your ssh key.') return render_template('add_key.tmpl', form=form) # Success! return render_template('add_key_success.tmpl') @app.route('/favicon.ico') def favicon(): return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon') """ Don't invoke this directly in production. This is for development only. Use a WSGI webserver such as Gunicorn to serve this in production. """ if __name__ == '__main__': app.run(debug=True)
bsd-2-clause
8,322,778,296,389,412,000
31.964567
147
0.639436
false
vivekdhayaal/keystone
keystone/token/persistence/backends/sql.py
6
11002
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from keystone.common import sql from keystone import exception from keystone.i18n import _LI from keystone import token from keystone.token import provider CONF = cfg.CONF LOG = log.getLogger(__name__) class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' attributes = ['id', 'expires', 'user_id', 'trust_id'] id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) valid = sql.Column(sql.Boolean(), default=True, nullable=False) user_id = sql.Column(sql.String(64)) trust_id = sql.Column(sql.String(64)) __table_args__ = ( sql.Index('ix_token_expires', 'expires'), sql.Index('ix_token_expires_valid', 'expires', 'valid'), sql.Index('ix_token_user_id', 'user_id'), sql.Index('ix_token_trust_id', 'trust_id') ) def _expiry_range_batched(session, upper_bound_func, batch_size): """Returns the stop point of the next batch for expiration. Return the timestamp of the next token that is `batch_size` rows from being the oldest expired token. """ # This expiry strategy splits the tokens into roughly equal sized batches # to be deleted. It does this by finding the timestamp of a token # `batch_size` rows from the oldest token and yielding that to the caller. # It's expected that the caller will then delete all rows with a timestamp # equal to or older than the one yielded. This may delete slightly more # tokens than the batch_size, but that should be ok in almost all cases. LOG.debug('Token expiration batch size: %d', batch_size) query = session.query(TokenModel.expires) query = query.filter(TokenModel.expires < upper_bound_func()) query = query.order_by(TokenModel.expires) query = query.offset(batch_size - 1) query = query.limit(1) while True: try: next_expiration = query.one()[0] except sql.NotFound: # There are less than `batch_size` rows remaining, so fall # through to the normal delete break yield next_expiration yield upper_bound_func() def _expiry_range_all(session, upper_bound_func): """Expires all tokens in one pass.""" yield upper_bound_func() class Token(token.persistence.Driver): # Public interface def get_token(self, token_id): if token_id is None: raise exception.TokenNotFound(token_id=token_id) session = sql.get_session() token_ref = session.query(TokenModel).get(token_id) if not token_ref or not token_ref.valid: raise exception.TokenNotFound(token_id=token_id) return token_ref.to_dict() def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if not data_copy.get('expires'): data_copy['expires'] = provider.default_expire_time() if not data_copy.get('user_id'): data_copy['user_id'] = data_copy['user']['id'] token_ref = TokenModel.from_dict(data_copy) token_ref.valid = True session = sql.get_session() with session.begin(): session.add(token_ref) return token_ref.to_dict() def delete_token(self, token_id): session = sql.get_session() with session.begin(): token_ref = session.query(TokenModel).get(token_id) if not token_ref or not token_ref.valid: raise exception.TokenNotFound(token_id=token_id) token_ref.valid = False def delete_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): """Deletes all tokens in one session The user_id will be ignored if the trust_id is specified. user_id will always be specified. If using a trust, the token's user_id is set to the trustee's user ID or the trustor's user ID, so will use trust_id to query the tokens. """ session = sql.get_session() token_list = [] with session.begin(): now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter_by(valid=True) query = query.filter(TokenModel.expires > now) if trust_id: query = query.filter(TokenModel.trust_id == trust_id) else: query = query.filter(TokenModel.user_id == user_id) for token_ref in query.all(): if tenant_id: token_ref_dict = token_ref.to_dict() if not self._tenant_matches(tenant_id, token_ref_dict): continue if consumer_id: token_ref_dict = token_ref.to_dict() if not self._consumer_matches(consumer_id, token_ref_dict): continue token_ref.valid = False token_list.append(token_ref.id) return token_list def _tenant_matches(self, tenant_id, token_ref_dict): return ((tenant_id is None) or (token_ref_dict.get('tenant') and token_ref_dict['tenant'].get('id') == tenant_id)) def _consumer_matches(self, consumer_id, ref): if consumer_id is None: return True else: try: oauth = ref['token_data']['token'].get('OS-OAUTH1', {}) return oauth and oauth['consumer_id'] == consumer_id except KeyError: return False def _list_tokens_for_trust(self, trust_id): session = sql.get_session() tokens = [] now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.trust_id == trust_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() tokens.append(token_ref_dict['id']) return tokens def _list_tokens_for_user(self, user_id, tenant_id=None): session = sql.get_session() tokens = [] now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.user_id == user_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() if self._tenant_matches(tenant_id, token_ref_dict): tokens.append(token_ref['id']) return tokens def _list_tokens_for_consumer(self, user_id, consumer_id): tokens = [] session = sql.get_session() with session.begin(): now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.user_id == user_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() if self._consumer_matches(consumer_id, token_ref_dict): tokens.append(token_ref_dict['id']) return tokens def _list_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): if not CONF.token.revoke_by_id: return [] if trust_id: return self._list_tokens_for_trust(trust_id) if consumer_id: return self._list_tokens_for_consumer(user_id, consumer_id) else: return self._list_tokens_for_user(user_id, tenant_id) def list_revoked_tokens(self): session = sql.get_session() tokens = [] now = timeutils.utcnow() query = session.query(TokenModel.id, TokenModel.expires) query = query.filter(TokenModel.expires > now) token_references = query.filter_by(valid=False) for token_ref in token_references: record = { 'id': token_ref[0], 'expires': token_ref[1], } tokens.append(record) return tokens def _expiry_range_strategy(self, dialect): """Choose a token range expiration strategy Based on the DB dialect, select an expiry range callable that is appropriate. """ # DB2 and MySQL can both benefit from a batched strategy. On DB2 the # transaction log can fill up and on MySQL w/Galera, large # transactions can exceed the maximum write set size. if dialect == 'ibm_db_sa': # Limit of 100 is known to not fill a transaction log # of default maximum size while not significantly # impacting the performance of large token purges on # systems where the maximum transaction log size has # been increased beyond the default. return functools.partial(_expiry_range_batched, batch_size=100) elif dialect == 'mysql': # We want somewhat more than 100, since Galera replication delay is # at least RTT*2. This can be a significant amount of time if # doing replication across a WAN. return functools.partial(_expiry_range_batched, batch_size=1000) return _expiry_range_all def flush_expired_tokens(self): session = sql.get_session() dialect = session.bind.dialect.name expiry_range_func = self._expiry_range_strategy(dialect) query = session.query(TokenModel.expires) total_removed = 0 upper_bound_func = timeutils.utcnow for expiry_time in expiry_range_func(session, upper_bound_func): delete_query = query.filter(TokenModel.expires <= expiry_time) row_count = delete_query.delete(synchronize_session=False) total_removed += row_count LOG.debug('Removed %d total expired tokens', total_removed) session.flush() LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
apache-2.0
5,088,823,085,838,505,000
37.876325
79
0.605799
false
norov/glibc
scripts/build-many-glibcs.py
1
70695
#!/usr/bin/python3 # Build many configurations of glibc. # Copyright (C) 2016-2017 Free Software Foundation, Inc. # This file is part of the GNU C Library. # # The GNU C Library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # The GNU C Library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with the GNU C Library; if not, see # <http://www.gnu.org/licenses/>. """Build many configurations of glibc. This script takes as arguments a directory name (containing a src subdirectory with sources of the relevant toolchain components) and a description of what to do: 'checkout', to check out sources into that directory, 'bot-cycle', to run a series of checkout and build steps, 'bot', to run 'bot-cycle' repeatedly, 'host-libraries', to build libraries required by the toolchain, 'compilers', to build cross-compilers for various configurations, or 'glibcs', to build glibc for various configurations and run the compilation parts of the testsuite. Subsequent arguments name the versions of components to check out (<component>-<version), for 'checkout', or, for actions other than 'checkout' and 'bot-cycle', name configurations for which compilers or glibc are to be built. """ import argparse import datetime import email.mime.text import email.utils import json import os import re import shutil import smtplib import stat import subprocess import sys import time import urllib.request try: os.cpu_count except: import multiprocessing os.cpu_count = lambda: multiprocessing.cpu_count() try: re.fullmatch except: re.fullmatch = lambda p,s,f=0: re.match(p+"\\Z",s,f) try: subprocess.run except: class _CompletedProcess: def __init__(self, args, returncode, stdout=None, stderr=None): self.args = args self.returncode = returncode self.stdout = stdout self.stderr = stderr def _run(*popenargs, input=None, timeout=None, check=False, **kwargs): assert(timeout is None) with subprocess.Popen(*popenargs, **kwargs) as process: try: stdout, stderr = process.communicate(input) except: process.kill() process.wait() raise returncode = process.poll() if check and returncode: raise subprocess.CalledProcessError(returncode, popenargs) return _CompletedProcess(popenargs, returncode, stdout, stderr) subprocess.run = _run class Context(object): """The global state associated with builds in a given directory.""" def __init__(self, topdir, parallelism, keep, replace_sources, strip, action): """Initialize the context.""" self.topdir = topdir self.parallelism = parallelism self.keep = keep self.replace_sources = replace_sources self.strip = strip self.srcdir = os.path.join(topdir, 'src') self.versions_json = os.path.join(self.srcdir, 'versions.json') self.build_state_json = os.path.join(topdir, 'build-state.json') self.bot_config_json = os.path.join(topdir, 'bot-config.json') self.installdir = os.path.join(topdir, 'install') self.host_libraries_installdir = os.path.join(self.installdir, 'host-libraries') self.builddir = os.path.join(topdir, 'build') self.logsdir = os.path.join(topdir, 'logs') self.logsdir_old = os.path.join(topdir, 'logs-old') self.makefile = os.path.join(self.builddir, 'Makefile') self.wrapper = os.path.join(self.builddir, 'wrapper') self.save_logs = os.path.join(self.builddir, 'save-logs') self.script_text = self.get_script_text() if action != 'checkout': self.build_triplet = self.get_build_triplet() self.glibc_version = self.get_glibc_version() self.configs = {} self.glibc_configs = {} self.makefile_pieces = ['.PHONY: all\n'] self.add_all_configs() self.load_versions_json() self.load_build_state_json() self.status_log_list = [] self.email_warning = False def get_script_text(self): """Return the text of this script.""" with open(sys.argv[0], 'r') as f: return f.read() def exec_self(self): """Re-execute this script with the same arguments.""" sys.stdout.flush() os.execv(sys.executable, [sys.executable] + sys.argv) def get_build_triplet(self): """Determine the build triplet with config.guess.""" config_guess = os.path.join(self.component_srcdir('gcc'), 'config.guess') cg_out = subprocess.run([config_guess], stdout=subprocess.PIPE, check=True, universal_newlines=True).stdout return cg_out.rstrip() def get_glibc_version(self): """Determine the glibc version number (major.minor).""" version_h = os.path.join(self.component_srcdir('glibc'), 'version.h') with open(version_h, 'r') as f: lines = f.readlines() starttext = '#define VERSION "' for l in lines: if l.startswith(starttext): l = l[len(starttext):] l = l.rstrip('"\n') m = re.fullmatch('([0-9]+)\.([0-9]+)[.0-9]*', l) return '%s.%s' % m.group(1, 2) print('error: could not determine glibc version') exit(1) def add_all_configs(self): """Add all known glibc build configurations.""" # On architectures missing __builtin_trap support, these # options may be needed as a workaround; see # <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70216> for SH. no_isolate = ('-fno-isolate-erroneous-paths-dereference' ' -fno-isolate-erroneous-paths-attribute') self.add_config(arch='aarch64', os_name='linux-gnu') self.add_config(arch='aarch64_be', os_name='linux-gnu') self.add_config(arch='alpha', os_name='linux-gnu') self.add_config(arch='arm', os_name='linux-gnueabi') self.add_config(arch='armeb', os_name='linux-gnueabi') self.add_config(arch='armeb', os_name='linux-gnueabi', variant='be8', gcc_cfg=['--with-arch=armv7-a']) self.add_config(arch='arm', os_name='linux-gnueabihf') self.add_config(arch='armeb', os_name='linux-gnueabihf') self.add_config(arch='armeb', os_name='linux-gnueabihf', variant='be8', gcc_cfg=['--with-arch=armv7-a']) self.add_config(arch='hppa', os_name='linux-gnu') self.add_config(arch='ia64', os_name='linux-gnu', first_gcc_cfg=['--with-system-libunwind']) self.add_config(arch='m68k', os_name='linux-gnu', gcc_cfg=['--disable-multilib']) self.add_config(arch='m68k', os_name='linux-gnu', variant='coldfire', gcc_cfg=['--with-arch=cf', '--disable-multilib']) self.add_config(arch='microblaze', os_name='linux-gnu', gcc_cfg=['--disable-multilib']) self.add_config(arch='microblazeel', os_name='linux-gnu', gcc_cfg=['--disable-multilib']) self.add_config(arch='mips64', os_name='linux-gnu', gcc_cfg=['--with-mips-plt'], glibcs=[{'variant': 'n32'}, {'arch': 'mips', 'ccopts': '-mabi=32'}, {'variant': 'n64', 'ccopts': '-mabi=64'}]) self.add_config(arch='mips64', os_name='linux-gnu', variant='soft', gcc_cfg=['--with-mips-plt', '--with-float=soft'], glibcs=[{'variant': 'n32-soft', 'cfg': ['--without-fp']}, {'variant': 'soft', 'arch': 'mips', 'ccopts': '-mabi=32', 'cfg': ['--without-fp']}, {'variant': 'n64-soft', 'ccopts': '-mabi=64', 'cfg': ['--without-fp']}]) self.add_config(arch='mips64', os_name='linux-gnu', variant='nan2008', gcc_cfg=['--with-mips-plt', '--with-nan=2008', '--with-arch-64=mips64r2', '--with-arch-32=mips32r2'], glibcs=[{'variant': 'n32-nan2008'}, {'variant': 'nan2008', 'arch': 'mips', 'ccopts': '-mabi=32'}, {'variant': 'n64-nan2008', 'ccopts': '-mabi=64'}]) self.add_config(arch='mips64', os_name='linux-gnu', variant='nan2008-soft', gcc_cfg=['--with-mips-plt', '--with-nan=2008', '--with-arch-64=mips64r2', '--with-arch-32=mips32r2', '--with-float=soft'], glibcs=[{'variant': 'n32-nan2008-soft', 'cfg': ['--without-fp']}, {'variant': 'nan2008-soft', 'arch': 'mips', 'ccopts': '-mabi=32', 'cfg': ['--without-fp']}, {'variant': 'n64-nan2008-soft', 'ccopts': '-mabi=64', 'cfg': ['--without-fp']}]) self.add_config(arch='mips64el', os_name='linux-gnu', gcc_cfg=['--with-mips-plt'], glibcs=[{'variant': 'n32'}, {'arch': 'mipsel', 'ccopts': '-mabi=32'}, {'variant': 'n64', 'ccopts': '-mabi=64'}]) self.add_config(arch='mips64el', os_name='linux-gnu', variant='soft', gcc_cfg=['--with-mips-plt', '--with-float=soft'], glibcs=[{'variant': 'n32-soft', 'cfg': ['--without-fp']}, {'variant': 'soft', 'arch': 'mipsel', 'ccopts': '-mabi=32', 'cfg': ['--without-fp']}, {'variant': 'n64-soft', 'ccopts': '-mabi=64', 'cfg': ['--without-fp']}]) self.add_config(arch='mips64el', os_name='linux-gnu', variant='nan2008', gcc_cfg=['--with-mips-plt', '--with-nan=2008', '--with-arch-64=mips64r2', '--with-arch-32=mips32r2'], glibcs=[{'variant': 'n32-nan2008'}, {'variant': 'nan2008', 'arch': 'mipsel', 'ccopts': '-mabi=32'}, {'variant': 'n64-nan2008', 'ccopts': '-mabi=64'}]) self.add_config(arch='mips64el', os_name='linux-gnu', variant='nan2008-soft', gcc_cfg=['--with-mips-plt', '--with-nan=2008', '--with-arch-64=mips64r2', '--with-arch-32=mips32r2', '--with-float=soft'], glibcs=[{'variant': 'n32-nan2008-soft', 'cfg': ['--without-fp']}, {'variant': 'nan2008-soft', 'arch': 'mipsel', 'ccopts': '-mabi=32', 'cfg': ['--without-fp']}, {'variant': 'n64-nan2008-soft', 'ccopts': '-mabi=64', 'cfg': ['--without-fp']}]) self.add_config(arch='nios2', os_name='linux-gnu') self.add_config(arch='powerpc', os_name='linux-gnu', gcc_cfg=['--disable-multilib', '--enable-secureplt'], extra_glibcs=[{'variant': 'power4', 'ccopts': '-mcpu=power4', 'cfg': ['--with-cpu=power4']}]) self.add_config(arch='powerpc', os_name='linux-gnu', variant='soft', gcc_cfg=['--disable-multilib', '--with-float=soft', '--enable-secureplt'], glibcs=[{'variant': 'soft', 'cfg': ['--without-fp']}]) self.add_config(arch='powerpc64', os_name='linux-gnu', gcc_cfg=['--disable-multilib', '--enable-secureplt']) self.add_config(arch='powerpc64le', os_name='linux-gnu', gcc_cfg=['--disable-multilib', '--enable-secureplt']) self.add_config(arch='powerpc', os_name='linux-gnuspe', gcc_cfg=['--disable-multilib', '--enable-secureplt', '--enable-e500-double'], glibcs=[{'cfg': ['--without-fp']}]) self.add_config(arch='powerpc', os_name='linux-gnuspe', variant='e500v1', gcc_cfg=['--disable-multilib', '--enable-secureplt'], glibcs=[{'variant': 'e500v1', 'cfg': ['--without-fp']}]) self.add_config(arch='s390x', os_name='linux-gnu', glibcs=[{}, {'arch': 's390', 'ccopts': '-m31'}]) self.add_config(arch='sh3', os_name='linux-gnu', glibcs=[{'ccopts': no_isolate}]) self.add_config(arch='sh3eb', os_name='linux-gnu', glibcs=[{'ccopts': no_isolate}]) self.add_config(arch='sh4', os_name='linux-gnu', glibcs=[{'ccopts': no_isolate}]) self.add_config(arch='sh4eb', os_name='linux-gnu', glibcs=[{'ccopts': no_isolate}]) self.add_config(arch='sh4', os_name='linux-gnu', variant='soft', gcc_cfg=['--without-fp'], glibcs=[{'variant': 'soft', 'cfg': ['--without-fp'], 'ccopts': no_isolate}]) self.add_config(arch='sh4eb', os_name='linux-gnu', variant='soft', gcc_cfg=['--without-fp'], glibcs=[{'variant': 'soft', 'cfg': ['--without-fp'], 'ccopts': no_isolate}]) self.add_config(arch='sparc64', os_name='linux-gnu', glibcs=[{}, {'arch': 'sparcv9', 'ccopts': '-m32 -mlong-double-128'}]) self.add_config(arch='tilegx', os_name='linux-gnu', glibcs=[{}, {'variant': '32', 'ccopts': '-m32'}]) self.add_config(arch='tilegxbe', os_name='linux-gnu', glibcs=[{}, {'variant': '32', 'ccopts': '-m32'}]) self.add_config(arch='tilepro', os_name='linux-gnu') self.add_config(arch='x86_64', os_name='linux-gnu', gcc_cfg=['--with-multilib-list=m64,m32,mx32'], glibcs=[{}, {'variant': 'x32', 'ccopts': '-mx32'}, {'arch': 'i686', 'ccopts': '-m32 -march=i686'}], extra_glibcs=[{'variant': 'disable-multi-arch', 'cfg': ['--disable-multi-arch']}, {'variant': 'disable-multi-arch', 'arch': 'i686', 'ccopts': '-m32 -march=i686', 'cfg': ['--disable-multi-arch']}, {'arch': 'i486', 'ccopts': '-m32 -march=i486'}, {'arch': 'i586', 'ccopts': '-m32 -march=i586'}]) def add_config(self, **args): """Add an individual build configuration.""" cfg = Config(self, **args) if cfg.name in self.configs: print('error: duplicate config %s' % cfg.name) exit(1) self.configs[cfg.name] = cfg for c in cfg.all_glibcs: if c.name in self.glibc_configs: print('error: duplicate glibc config %s' % c.name) exit(1) self.glibc_configs[c.name] = c def component_srcdir(self, component): """Return the source directory for a given component, e.g. gcc.""" return os.path.join(self.srcdir, component) def component_builddir(self, action, config, component, subconfig=None): """Return the directory to use for a build.""" if config is None: # Host libraries. assert subconfig is None return os.path.join(self.builddir, action, component) if subconfig is None: return os.path.join(self.builddir, action, config, component) else: # glibc build as part of compiler build. return os.path.join(self.builddir, action, config, component, subconfig) def compiler_installdir(self, config): """Return the directory in which to install a compiler.""" return os.path.join(self.installdir, 'compilers', config) def compiler_bindir(self, config): """Return the directory in which to find compiler binaries.""" return os.path.join(self.compiler_installdir(config), 'bin') def compiler_sysroot(self, config): """Return the sysroot directory for a compiler.""" return os.path.join(self.compiler_installdir(config), 'sysroot') def glibc_installdir(self, config): """Return the directory in which to install glibc.""" return os.path.join(self.installdir, 'glibcs', config) def run_builds(self, action, configs): """Run the requested builds.""" if action == 'checkout': self.checkout(configs) return if action == 'bot-cycle': if configs: print('error: configurations specified for bot-cycle') exit(1) self.bot_cycle() return if action == 'bot': if configs: print('error: configurations specified for bot') exit(1) self.bot() return if action == 'host-libraries' and configs: print('error: configurations specified for host-libraries') exit(1) self.clear_last_build_state(action) build_time = datetime.datetime.utcnow() if action == 'host-libraries': build_components = ('gmp', 'mpfr', 'mpc') old_components = () old_versions = {} self.build_host_libraries() elif action == 'compilers': build_components = ('binutils', 'gcc', 'glibc', 'linux') old_components = ('gmp', 'mpfr', 'mpc') old_versions = self.build_state['host-libraries']['build-versions'] self.build_compilers(configs) else: build_components = ('glibc',) old_components = ('gmp', 'mpfr', 'mpc', 'binutils', 'gcc', 'linux') old_versions = self.build_state['compilers']['build-versions'] self.build_glibcs(configs) self.write_files() self.do_build() if configs: # Partial build, do not update stored state. return build_versions = {} for k in build_components: if k in self.versions: build_versions[k] = {'version': self.versions[k]['version'], 'revision': self.versions[k]['revision']} for k in old_components: if k in old_versions: build_versions[k] = {'version': old_versions[k]['version'], 'revision': old_versions[k]['revision']} self.update_build_state(action, build_time, build_versions) @staticmethod def remove_dirs(*args): """Remove directories and their contents if they exist.""" for dir in args: shutil.rmtree(dir, ignore_errors=True) @staticmethod def remove_recreate_dirs(*args): """Remove directories if they exist, and create them as empty.""" Context.remove_dirs(*args) for dir in args: os.makedirs(dir, exist_ok=True) def add_makefile_cmdlist(self, target, cmdlist, logsdir): """Add makefile text for a list of commands.""" commands = cmdlist.makefile_commands(self.wrapper, logsdir) self.makefile_pieces.append('all: %s\n.PHONY: %s\n%s:\n%s\n' % (target, target, target, commands)) self.status_log_list.extend(cmdlist.status_logs(logsdir)) def write_files(self): """Write out the Makefile and wrapper script.""" mftext = ''.join(self.makefile_pieces) with open(self.makefile, 'w') as f: f.write(mftext) wrapper_text = ( '#!/bin/sh\n' 'prev_base=$1\n' 'this_base=$2\n' 'desc=$3\n' 'dir=$4\n' 'path=$5\n' 'shift 5\n' 'prev_status=$prev_base-status.txt\n' 'this_status=$this_base-status.txt\n' 'this_log=$this_base-log.txt\n' 'date > "$this_log"\n' 'echo >> "$this_log"\n' 'echo "Description: $desc" >> "$this_log"\n' 'printf "%s" "Command:" >> "$this_log"\n' 'for word in "$@"; do\n' ' if expr "$word" : "[]+,./0-9@A-Z_a-z-]\\\\{1,\\\\}\\$" > /dev/null; then\n' ' printf " %s" "$word"\n' ' else\n' ' printf " \'"\n' ' printf "%s" "$word" | sed -e "s/\'/\'\\\\\\\\\'\'/"\n' ' printf "\'"\n' ' fi\n' 'done >> "$this_log"\n' 'echo >> "$this_log"\n' 'echo "Directory: $dir" >> "$this_log"\n' 'echo "Path addition: $path" >> "$this_log"\n' 'echo >> "$this_log"\n' 'record_status ()\n' '{\n' ' echo >> "$this_log"\n' ' echo "$1: $desc" > "$this_status"\n' ' echo "$1: $desc" >> "$this_log"\n' ' echo >> "$this_log"\n' ' date >> "$this_log"\n' ' echo "$1: $desc"\n' ' exit 0\n' '}\n' 'check_error ()\n' '{\n' ' if [ "$1" != "0" ]; then\n' ' record_status FAIL\n' ' fi\n' '}\n' 'if [ "$prev_base" ] && ! grep -q "^PASS" "$prev_status"; then\n' ' record_status UNRESOLVED\n' 'fi\n' 'if [ "$dir" ]; then\n' ' cd "$dir"\n' ' check_error "$?"\n' 'fi\n' 'if [ "$path" ]; then\n' ' PATH=$path:$PATH\n' 'fi\n' '"$@" < /dev/null >> "$this_log" 2>&1\n' 'check_error "$?"\n' 'record_status PASS\n') with open(self.wrapper, 'w') as f: f.write(wrapper_text) # Mode 0o755. mode_exec = (stat.S_IRWXU|stat.S_IRGRP|stat.S_IXGRP| stat.S_IROTH|stat.S_IXOTH) os.chmod(self.wrapper, mode_exec) save_logs_text = ( '#!/bin/sh\n' 'if ! [ -f tests.sum ]; then\n' ' echo "No test summary available."\n' ' exit 0\n' 'fi\n' 'save_file ()\n' '{\n' ' echo "Contents of $1:"\n' ' echo\n' ' cat "$1"\n' ' echo\n' ' echo "End of contents of $1."\n' ' echo\n' '}\n' 'save_file tests.sum\n' 'non_pass_tests=$(grep -v "^PASS: " tests.sum | sed -e "s/^PASS: //")\n' 'for t in $non_pass_tests; do\n' ' if [ -f "$t.out" ]; then\n' ' save_file "$t.out"\n' ' fi\n' 'done\n') with open(self.save_logs, 'w') as f: f.write(save_logs_text) os.chmod(self.save_logs, mode_exec) def do_build(self): """Do the actual build.""" cmd = ['make', '-j%d' % self.parallelism] subprocess.run(cmd, cwd=self.builddir, check=True) def build_host_libraries(self): """Build the host libraries.""" installdir = self.host_libraries_installdir builddir = os.path.join(self.builddir, 'host-libraries') logsdir = os.path.join(self.logsdir, 'host-libraries') self.remove_recreate_dirs(installdir, builddir, logsdir) cmdlist = CommandList('host-libraries', self.keep) self.build_host_library(cmdlist, 'gmp') self.build_host_library(cmdlist, 'mpfr', ['--with-gmp=%s' % installdir]) self.build_host_library(cmdlist, 'mpc', ['--with-gmp=%s' % installdir, '--with-mpfr=%s' % installdir]) cmdlist.add_command('done', ['touch', os.path.join(installdir, 'ok')]) self.add_makefile_cmdlist('host-libraries', cmdlist, logsdir) def build_host_library(self, cmdlist, lib, extra_opts=None): """Build one host library.""" srcdir = self.component_srcdir(lib) builddir = self.component_builddir('host-libraries', None, lib) installdir = self.host_libraries_installdir cmdlist.push_subdesc(lib) cmdlist.create_use_dir(builddir) cfg_cmd = [os.path.join(srcdir, 'configure'), '--prefix=%s' % installdir, '--disable-shared'] if extra_opts: cfg_cmd.extend (extra_opts) cmdlist.add_command('configure', cfg_cmd) cmdlist.add_command('build', ['make']) cmdlist.add_command('check', ['make', 'check']) cmdlist.add_command('install', ['make', 'install']) cmdlist.cleanup_dir() cmdlist.pop_subdesc() def build_compilers(self, configs): """Build the compilers.""" if not configs: self.remove_dirs(os.path.join(self.builddir, 'compilers')) self.remove_dirs(os.path.join(self.installdir, 'compilers')) self.remove_dirs(os.path.join(self.logsdir, 'compilers')) configs = sorted(self.configs.keys()) for c in configs: self.configs[c].build() def build_glibcs(self, configs): """Build the glibcs.""" if not configs: self.remove_dirs(os.path.join(self.builddir, 'glibcs')) self.remove_dirs(os.path.join(self.installdir, 'glibcs')) self.remove_dirs(os.path.join(self.logsdir, 'glibcs')) configs = sorted(self.glibc_configs.keys()) for c in configs: self.glibc_configs[c].build() def load_versions_json(self): """Load information about source directory versions.""" if not os.access(self.versions_json, os.F_OK): self.versions = {} return with open(self.versions_json, 'r') as f: self.versions = json.load(f) def store_json(self, data, filename): """Store information in a JSON file.""" filename_tmp = filename + '.tmp' with open(filename_tmp, 'w') as f: json.dump(data, f, indent=2, sort_keys=True) os.rename(filename_tmp, filename) def store_versions_json(self): """Store information about source directory versions.""" self.store_json(self.versions, self.versions_json) def set_component_version(self, component, version, explicit, revision): """Set the version information for a component.""" self.versions[component] = {'version': version, 'explicit': explicit, 'revision': revision} self.store_versions_json() def checkout(self, versions): """Check out the desired component versions.""" default_versions = {'binutils': 'vcs-2.28', 'gcc': 'vcs-6', 'glibc': 'vcs-mainline', 'gmp': '6.1.1', 'linux': '4.9', 'mpc': '1.0.3', 'mpfr': '3.1.5'} use_versions = {} explicit_versions = {} for v in versions: found_v = False for k in default_versions.keys(): kx = k + '-' if v.startswith(kx): vx = v[len(kx):] if k in use_versions: print('error: multiple versions for %s' % k) exit(1) use_versions[k] = vx explicit_versions[k] = True found_v = True break if not found_v: print('error: unknown component in %s' % v) exit(1) for k in default_versions.keys(): if k not in use_versions: if k in self.versions and self.versions[k]['explicit']: use_versions[k] = self.versions[k]['version'] explicit_versions[k] = True else: use_versions[k] = default_versions[k] explicit_versions[k] = False os.makedirs(self.srcdir, exist_ok=True) for k in sorted(default_versions.keys()): update = os.access(self.component_srcdir(k), os.F_OK) v = use_versions[k] if (update and k in self.versions and v != self.versions[k]['version']): if not self.replace_sources: print('error: version of %s has changed from %s to %s, ' 'use --replace-sources to check out again' % (k, self.versions[k]['version'], v)) exit(1) shutil.rmtree(self.component_srcdir(k)) update = False if v.startswith('vcs-'): revision = self.checkout_vcs(k, v[4:], update) else: self.checkout_tar(k, v, update) revision = v self.set_component_version(k, v, explicit_versions[k], revision) if self.get_script_text() != self.script_text: # Rerun the checkout process in case the updated script # uses different default versions or new components. self.exec_self() def checkout_vcs(self, component, version, update): """Check out the given version of the given component from version control. Return a revision identifier.""" if component == 'binutils': git_url = 'git://sourceware.org/git/binutils-gdb.git' if version == 'mainline': git_branch = 'master' else: trans = str.maketrans({'.': '_'}) git_branch = 'binutils-%s-branch' % version.translate(trans) return self.git_checkout(component, git_url, git_branch, update) elif component == 'gcc': if version == 'mainline': branch = 'trunk' else: trans = str.maketrans({'.': '_'}) branch = 'branches/gcc-%s-branch' % version.translate(trans) svn_url = 'svn://gcc.gnu.org/svn/gcc/%s' % branch return self.gcc_checkout(svn_url, update) elif component == 'glibc': git_url = 'git://sourceware.org/git/glibc.git' if version == 'mainline': git_branch = 'master' else: git_branch = 'release/%s/master' % version r = self.git_checkout(component, git_url, git_branch, update) self.fix_glibc_timestamps() return r else: print('error: component %s coming from VCS' % component) exit(1) def git_checkout(self, component, git_url, git_branch, update): """Check out a component from git. Return a commit identifier.""" if update: subprocess.run(['git', 'remote', 'prune', 'origin'], cwd=self.component_srcdir(component), check=True) subprocess.run(['git', 'pull', '-q'], cwd=self.component_srcdir(component), check=True) else: subprocess.run(['git', 'clone', '-q', '-b', git_branch, git_url, self.component_srcdir(component)], check=True) r = subprocess.run(['git', 'rev-parse', 'HEAD'], cwd=self.component_srcdir(component), stdout=subprocess.PIPE, check=True, universal_newlines=True).stdout return r.rstrip() def fix_glibc_timestamps(self): """Fix timestamps in a glibc checkout.""" # Ensure that builds do not try to regenerate generated files # in the source tree. srcdir = self.component_srcdir('glibc') for dirpath, dirnames, filenames in os.walk(srcdir): for f in filenames: if (f == 'configure' or f == 'preconfigure' or f.endswith('-kw.h')): to_touch = os.path.join(dirpath, f) subprocess.run(['touch', to_touch], check=True) def gcc_checkout(self, svn_url, update): """Check out GCC from SVN. Return the revision number.""" if not update: subprocess.run(['svn', 'co', '-q', svn_url, self.component_srcdir('gcc')], check=True) subprocess.run(['contrib/gcc_update', '--silent'], cwd=self.component_srcdir('gcc'), check=True) r = subprocess.run(['svnversion', self.component_srcdir('gcc')], stdout=subprocess.PIPE, check=True, universal_newlines=True).stdout return r.rstrip() def checkout_tar(self, component, version, update): """Check out the given version of the given component from a tarball.""" if update: return url_map = {'binutils': 'https://ftp.gnu.org/gnu/binutils/binutils-%(version)s.tar.bz2', 'gcc': 'https://ftp.gnu.org/gnu/gcc/gcc-%(version)s/gcc-%(version)s.tar.bz2', 'gmp': 'https://ftp.gnu.org/gnu/gmp/gmp-%(version)s.tar.xz', 'linux': 'https://www.kernel.org/pub/linux/kernel/v4.x/linux-%(version)s.tar.xz', 'mpc': 'https://ftp.gnu.org/gnu/mpc/mpc-%(version)s.tar.gz', 'mpfr': 'https://ftp.gnu.org/gnu/mpfr/mpfr-%(version)s.tar.xz'} if component not in url_map: print('error: component %s coming from tarball' % component) exit(1) url = url_map[component] % {'version': version} filename = os.path.join(self.srcdir, url.split('/')[-1]) response = urllib.request.urlopen(url) data = response.read() with open(filename, 'wb') as f: f.write(data) subprocess.run(['tar', '-C', self.srcdir, '-x', '-f', filename], check=True) os.rename(os.path.join(self.srcdir, '%s-%s' % (component, version)), self.component_srcdir(component)) os.remove(filename) def load_build_state_json(self): """Load information about the state of previous builds.""" if os.access(self.build_state_json, os.F_OK): with open(self.build_state_json, 'r') as f: self.build_state = json.load(f) else: self.build_state = {} for k in ('host-libraries', 'compilers', 'glibcs'): if k not in self.build_state: self.build_state[k] = {} if 'build-time' not in self.build_state[k]: self.build_state[k]['build-time'] = '' if 'build-versions' not in self.build_state[k]: self.build_state[k]['build-versions'] = {} if 'build-results' not in self.build_state[k]: self.build_state[k]['build-results'] = {} if 'result-changes' not in self.build_state[k]: self.build_state[k]['result-changes'] = {} if 'ever-passed' not in self.build_state[k]: self.build_state[k]['ever-passed'] = [] def store_build_state_json(self): """Store information about the state of previous builds.""" self.store_json(self.build_state, self.build_state_json) def clear_last_build_state(self, action): """Clear information about the state of part of the build.""" # We clear the last build time and versions when starting a # new build. The results of the last build are kept around, # as comparison is still meaningful if this build is aborted # and a new one started. self.build_state[action]['build-time'] = '' self.build_state[action]['build-versions'] = {} self.store_build_state_json() def update_build_state(self, action, build_time, build_versions): """Update the build state after a build.""" build_time = build_time.replace(microsecond=0) self.build_state[action]['build-time'] = str(build_time) self.build_state[action]['build-versions'] = build_versions build_results = {} for log in self.status_log_list: with open(log, 'r') as f: log_text = f.read() log_text = log_text.rstrip() m = re.fullmatch('([A-Z]+): (.*)', log_text) result = m.group(1) test_name = m.group(2) assert test_name not in build_results build_results[test_name] = result old_build_results = self.build_state[action]['build-results'] self.build_state[action]['build-results'] = build_results result_changes = {} all_tests = set(old_build_results.keys()) | set(build_results.keys()) for t in all_tests: if t in old_build_results: old_res = old_build_results[t] else: old_res = '(New test)' if t in build_results: new_res = build_results[t] else: new_res = '(Test removed)' if old_res != new_res: result_changes[t] = '%s -> %s' % (old_res, new_res) self.build_state[action]['result-changes'] = result_changes old_ever_passed = {t for t in self.build_state[action]['ever-passed'] if t in build_results} new_passes = {t for t in build_results if build_results[t] == 'PASS'} self.build_state[action]['ever-passed'] = sorted(old_ever_passed | new_passes) self.store_build_state_json() def load_bot_config_json(self): """Load bot configuration.""" with open(self.bot_config_json, 'r') as f: self.bot_config = json.load(f) def part_build_old(self, action, delay): """Return whether the last build for a given action was at least a given number of seconds ago, or does not have a time recorded.""" old_time_str = self.build_state[action]['build-time'] if not old_time_str: return True old_time = datetime.datetime.strptime(old_time_str, '%Y-%m-%d %H:%M:%S') new_time = datetime.datetime.utcnow() delta = new_time - old_time return delta.total_seconds() >= delay def bot_cycle(self): """Run a single round of checkout and builds.""" print('Bot cycle starting %s.' % str(datetime.datetime.utcnow())) self.load_bot_config_json() actions = ('host-libraries', 'compilers', 'glibcs') self.bot_run_self(['--replace-sources'], 'checkout') self.load_versions_json() if self.get_script_text() != self.script_text: print('Script changed, re-execing.') # On script change, all parts of the build should be rerun. for a in actions: self.clear_last_build_state(a) self.exec_self() check_components = {'host-libraries': ('gmp', 'mpfr', 'mpc'), 'compilers': ('binutils', 'gcc', 'glibc', 'linux'), 'glibcs': ('glibc',)} must_build = {} for a in actions: build_vers = self.build_state[a]['build-versions'] must_build[a] = False if not self.build_state[a]['build-time']: must_build[a] = True old_vers = {} new_vers = {} for c in check_components[a]: if c in build_vers: old_vers[c] = build_vers[c] new_vers[c] = {'version': self.versions[c]['version'], 'revision': self.versions[c]['revision']} if new_vers == old_vers: print('Versions for %s unchanged.' % a) else: print('Versions changed or rebuild forced for %s.' % a) if a == 'compilers' and not self.part_build_old( a, self.bot_config['compilers-rebuild-delay']): print('Not requiring rebuild of compilers this soon.') else: must_build[a] = True if must_build['host-libraries']: must_build['compilers'] = True if must_build['compilers']: must_build['glibcs'] = True for a in actions: if must_build[a]: print('Must rebuild %s.' % a) self.clear_last_build_state(a) else: print('No need to rebuild %s.' % a) if os.access(self.logsdir, os.F_OK): shutil.rmtree(self.logsdir_old, ignore_errors=True) shutil.copytree(self.logsdir, self.logsdir_old) for a in actions: if must_build[a]: build_time = datetime.datetime.utcnow() print('Rebuilding %s at %s.' % (a, str(build_time))) self.bot_run_self([], a) self.load_build_state_json() self.bot_build_mail(a, build_time) print('Bot cycle done at %s.' % str(datetime.datetime.utcnow())) def bot_build_mail(self, action, build_time): """Send email with the results of a build.""" if not ('email-from' in self.bot_config and 'email-server' in self.bot_config and 'email-subject' in self.bot_config and 'email-to' in self.bot_config): if not self.email_warning: print("Email not configured, not sending.") self.email_warning = True return build_time = build_time.replace(microsecond=0) subject = (self.bot_config['email-subject'] % {'action': action, 'build-time': str(build_time)}) results = self.build_state[action]['build-results'] changes = self.build_state[action]['result-changes'] ever_passed = set(self.build_state[action]['ever-passed']) versions = self.build_state[action]['build-versions'] new_regressions = {k for k in changes if changes[k] == 'PASS -> FAIL'} all_regressions = {k for k in ever_passed if results[k] == 'FAIL'} all_fails = {k for k in results if results[k] == 'FAIL'} if new_regressions: new_reg_list = sorted(['FAIL: %s' % k for k in new_regressions]) new_reg_text = ('New regressions:\n\n%s\n\n' % '\n'.join(new_reg_list)) else: new_reg_text = '' if all_regressions: all_reg_list = sorted(['FAIL: %s' % k for k in all_regressions]) all_reg_text = ('All regressions:\n\n%s\n\n' % '\n'.join(all_reg_list)) else: all_reg_text = '' if all_fails: all_fail_list = sorted(['FAIL: %s' % k for k in all_fails]) all_fail_text = ('All failures:\n\n%s\n\n' % '\n'.join(all_fail_list)) else: all_fail_text = '' if changes: changes_list = sorted(changes.keys()) changes_list = ['%s: %s' % (changes[k], k) for k in changes_list] changes_text = ('All changed results:\n\n%s\n\n' % '\n'.join(changes_list)) else: changes_text = '' results_text = (new_reg_text + all_reg_text + all_fail_text + changes_text) if not results_text: results_text = 'Clean build with unchanged results.\n\n' versions_list = sorted(versions.keys()) versions_list = ['%s: %s (%s)' % (k, versions[k]['version'], versions[k]['revision']) for k in versions_list] versions_text = ('Component versions for this build:\n\n%s\n' % '\n'.join(versions_list)) body_text = results_text + versions_text msg = email.mime.text.MIMEText(body_text) msg['Subject'] = subject msg['From'] = self.bot_config['email-from'] msg['To'] = self.bot_config['email-to'] msg['Message-ID'] = email.utils.make_msgid() msg['Date'] = email.utils.format_datetime(datetime.datetime.utcnow()) with smtplib.SMTP(self.bot_config['email-server']) as s: s.send_message(msg) def bot_run_self(self, opts, action, check=True): """Run a copy of this script with given options.""" cmd = [sys.executable, sys.argv[0], '--keep=none', '-j%d' % self.parallelism] cmd.extend(opts) cmd.extend([self.topdir, action]) sys.stdout.flush() subprocess.run(cmd, check=check) def bot(self): """Run repeated rounds of checkout and builds.""" while True: self.load_bot_config_json() if not self.bot_config['run']: print('Bot exiting by request.') exit(0) self.bot_run_self([], 'bot-cycle', check=False) self.load_bot_config_json() if not self.bot_config['run']: print('Bot exiting by request.') exit(0) time.sleep(self.bot_config['delay']) if self.get_script_text() != self.script_text: print('Script changed, bot re-execing.') self.exec_self() class Config(object): """A configuration for building a compiler and associated libraries.""" def __init__(self, ctx, arch, os_name, variant=None, gcc_cfg=None, first_gcc_cfg=None, glibcs=None, extra_glibcs=None): """Initialize a Config object.""" self.ctx = ctx self.arch = arch self.os = os_name self.variant = variant if variant is None: self.name = '%s-%s' % (arch, os_name) else: self.name = '%s-%s-%s' % (arch, os_name, variant) self.triplet = '%s-glibc-%s' % (arch, os_name) if gcc_cfg is None: self.gcc_cfg = [] else: self.gcc_cfg = gcc_cfg if first_gcc_cfg is None: self.first_gcc_cfg = [] else: self.first_gcc_cfg = first_gcc_cfg if glibcs is None: glibcs = [{'variant': variant}] if extra_glibcs is None: extra_glibcs = [] glibcs = [Glibc(self, **g) for g in glibcs] extra_glibcs = [Glibc(self, **g) for g in extra_glibcs] self.all_glibcs = glibcs + extra_glibcs self.compiler_glibcs = glibcs self.installdir = ctx.compiler_installdir(self.name) self.bindir = ctx.compiler_bindir(self.name) self.sysroot = ctx.compiler_sysroot(self.name) self.builddir = os.path.join(ctx.builddir, 'compilers', self.name) self.logsdir = os.path.join(ctx.logsdir, 'compilers', self.name) def component_builddir(self, component): """Return the directory to use for a (non-glibc) build.""" return self.ctx.component_builddir('compilers', self.name, component) def build(self): """Generate commands to build this compiler.""" self.ctx.remove_recreate_dirs(self.installdir, self.builddir, self.logsdir) cmdlist = CommandList('compilers-%s' % self.name, self.ctx.keep) cmdlist.add_command('check-host-libraries', ['test', '-f', os.path.join(self.ctx.host_libraries_installdir, 'ok')]) cmdlist.use_path(self.bindir) self.build_cross_tool(cmdlist, 'binutils', 'binutils', ['--disable-gdb', '--disable-libdecnumber', '--disable-readline', '--disable-sim']) if self.os.startswith('linux'): self.install_linux_headers(cmdlist) self.build_gcc(cmdlist, True) for g in self.compiler_glibcs: cmdlist.push_subdesc('glibc') cmdlist.push_subdesc(g.name) g.build_glibc(cmdlist, True) cmdlist.pop_subdesc() cmdlist.pop_subdesc() self.build_gcc(cmdlist, False) cmdlist.add_command('done', ['touch', os.path.join(self.installdir, 'ok')]) self.ctx.add_makefile_cmdlist('compilers-%s' % self.name, cmdlist, self.logsdir) def build_cross_tool(self, cmdlist, tool_src, tool_build, extra_opts=None): """Build one cross tool.""" srcdir = self.ctx.component_srcdir(tool_src) builddir = self.component_builddir(tool_build) cmdlist.push_subdesc(tool_build) cmdlist.create_use_dir(builddir) cfg_cmd = [os.path.join(srcdir, 'configure'), '--prefix=%s' % self.installdir, '--build=%s' % self.ctx.build_triplet, '--host=%s' % self.ctx.build_triplet, '--target=%s' % self.triplet, '--with-sysroot=%s' % self.sysroot] if extra_opts: cfg_cmd.extend(extra_opts) cmdlist.add_command('configure', cfg_cmd) cmdlist.add_command('build', ['make']) # Parallel "make install" for GCC has race conditions that can # cause it to fail; see # <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42980>. Such # problems are not known for binutils, but doing the # installation in parallel within a particular toolchain build # (as opposed to installation of one toolchain from # build-many-glibcs.py running in parallel to the installation # of other toolchains being built) is not known to be # significantly beneficial, so it is simplest just to disable # parallel install for cross tools here. cmdlist.add_command('install', ['make', '-j1', 'install']) cmdlist.cleanup_dir() cmdlist.pop_subdesc() def install_linux_headers(self, cmdlist): """Install Linux kernel headers.""" arch_map = {'aarch64': 'arm64', 'alpha': 'alpha', 'arm': 'arm', 'hppa': 'parisc', 'i486': 'x86', 'i586': 'x86', 'i686': 'x86', 'i786': 'x86', 'ia64': 'ia64', 'm68k': 'm68k', 'microblaze': 'microblaze', 'mips': 'mips', 'nios2': 'nios2', 'powerpc': 'powerpc', 's390': 's390', 'sh': 'sh', 'sparc': 'sparc', 'tile': 'tile', 'x86_64': 'x86'} linux_arch = None for k in arch_map: if self.arch.startswith(k): linux_arch = arch_map[k] break assert linux_arch is not None srcdir = self.ctx.component_srcdir('linux') builddir = self.component_builddir('linux') headers_dir = os.path.join(self.sysroot, 'usr') cmdlist.push_subdesc('linux') cmdlist.create_use_dir(builddir) cmdlist.add_command('install-headers', ['make', '-C', srcdir, 'O=%s' % builddir, 'ARCH=%s' % linux_arch, 'INSTALL_HDR_PATH=%s' % headers_dir, 'headers_install']) cmdlist.cleanup_dir() cmdlist.pop_subdesc() def build_gcc(self, cmdlist, bootstrap): """Build GCC.""" # libsanitizer commonly breaks because of glibc header # changes, or on unusual targets. libssp is of little # relevance with glibc's own stack checking support. cfg_opts = list(self.gcc_cfg) cfg_opts += ['--disable-libsanitizer', '--disable-libssp'] host_libs = self.ctx.host_libraries_installdir cfg_opts += ['--with-gmp=%s' % host_libs, '--with-mpfr=%s' % host_libs, '--with-mpc=%s' % host_libs] if bootstrap: tool_build = 'gcc-first' # Building a static-only, C-only compiler that is # sufficient to build glibc. Various libraries and # features that may require libc headers must be disabled. # When configuring with a sysroot, --with-newlib is # required to define inhibit_libc (to stop some parts of # libgcc including libc headers); --without-headers is not # sufficient. cfg_opts += ['--enable-languages=c', '--disable-shared', '--disable-threads', '--disable-libatomic', '--disable-decimal-float', '--disable-libffi', '--disable-libgomp', '--disable-libitm', '--disable-libmpx', '--disable-libquadmath', '--without-headers', '--with-newlib', '--with-glibc-version=%s' % self.ctx.glibc_version ] cfg_opts += self.first_gcc_cfg else: tool_build = 'gcc' cfg_opts += ['--enable-languages=c,c++', '--enable-shared', '--enable-threads'] self.build_cross_tool(cmdlist, 'gcc', tool_build, cfg_opts) class Glibc(object): """A configuration for building glibc.""" def __init__(self, compiler, arch=None, os_name=None, variant=None, cfg=None, ccopts=None): """Initialize a Glibc object.""" self.ctx = compiler.ctx self.compiler = compiler if arch is None: self.arch = compiler.arch else: self.arch = arch if os_name is None: self.os = compiler.os else: self.os = os_name self.variant = variant if variant is None: self.name = '%s-%s' % (self.arch, self.os) else: self.name = '%s-%s-%s' % (self.arch, self.os, variant) self.triplet = '%s-glibc-%s' % (self.arch, self.os) if cfg is None: self.cfg = [] else: self.cfg = cfg self.ccopts = ccopts def tool_name(self, tool): """Return the name of a cross-compilation tool.""" ctool = '%s-%s' % (self.compiler.triplet, tool) if self.ccopts and (tool == 'gcc' or tool == 'g++'): ctool = '%s %s' % (ctool, self.ccopts) return ctool def build(self): """Generate commands to build this glibc.""" builddir = self.ctx.component_builddir('glibcs', self.name, 'glibc') installdir = self.ctx.glibc_installdir(self.name) logsdir = os.path.join(self.ctx.logsdir, 'glibcs', self.name) self.ctx.remove_recreate_dirs(installdir, builddir, logsdir) cmdlist = CommandList('glibcs-%s' % self.name, self.ctx.keep) cmdlist.add_command('check-compilers', ['test', '-f', os.path.join(self.compiler.installdir, 'ok')]) cmdlist.use_path(self.compiler.bindir) self.build_glibc(cmdlist, False) self.ctx.add_makefile_cmdlist('glibcs-%s' % self.name, cmdlist, logsdir) def build_glibc(self, cmdlist, for_compiler): """Generate commands to build this glibc, either as part of a compiler build or with the bootstrapped compiler (and in the latter case, run tests as well).""" srcdir = self.ctx.component_srcdir('glibc') if for_compiler: builddir = self.ctx.component_builddir('compilers', self.compiler.name, 'glibc', self.name) installdir = self.compiler.sysroot srcdir_copy = self.ctx.component_builddir('compilers', self.compiler.name, 'glibc-src', self.name) else: builddir = self.ctx.component_builddir('glibcs', self.name, 'glibc') installdir = self.ctx.glibc_installdir(self.name) srcdir_copy = self.ctx.component_builddir('glibcs', self.name, 'glibc-src') cmdlist.create_use_dir(builddir) # glibc builds write into the source directory, and even if # not intentionally there is a risk of bugs that involve # writing into the working directory. To avoid possible # concurrency issues, copy the source directory. cmdlist.create_copy_dir(srcdir, srcdir_copy) cfg_cmd = [os.path.join(srcdir_copy, 'configure'), '--prefix=/usr', '--enable-add-ons', '--build=%s' % self.ctx.build_triplet, '--host=%s' % self.triplet, 'CC=%s' % self.tool_name('gcc'), 'CXX=%s' % self.tool_name('g++'), 'AR=%s' % self.tool_name('ar'), 'AS=%s' % self.tool_name('as'), 'LD=%s' % self.tool_name('ld'), 'NM=%s' % self.tool_name('nm'), 'OBJCOPY=%s' % self.tool_name('objcopy'), 'OBJDUMP=%s' % self.tool_name('objdump'), 'RANLIB=%s' % self.tool_name('ranlib'), 'READELF=%s' % self.tool_name('readelf'), 'STRIP=%s' % self.tool_name('strip')] cfg_cmd += self.cfg cmdlist.add_command('configure', cfg_cmd) cmdlist.add_command('build', ['make']) cmdlist.add_command('install', ['make', 'install', 'install_root=%s' % installdir]) # GCC uses paths such as lib/../lib64, so make sure lib # directories always exist. cmdlist.add_command('mkdir-lib', ['mkdir', '-p', os.path.join(installdir, 'lib'), os.path.join(installdir, 'usr', 'lib')]) if not for_compiler: if self.ctx.strip: cmdlist.add_command('strip', ['sh', '-c', ('%s %s/lib*/*.so' % (self.tool_name('strip'), installdir))]) cmdlist.add_command('check', ['make', 'check']) cmdlist.add_command('save-logs', [self.ctx.save_logs], always_run=True) cmdlist.cleanup_dir('cleanup-src', srcdir_copy) cmdlist.cleanup_dir() class Command(object): """A command run in the build process.""" def __init__(self, desc, num, dir, path, command, always_run=False): """Initialize a Command object.""" self.dir = dir self.path = path self.desc = desc trans = str.maketrans({' ': '-'}) self.logbase = '%03d-%s' % (num, desc.translate(trans)) self.command = command self.always_run = always_run @staticmethod def shell_make_quote_string(s): """Given a string not containing a newline, quote it for use by the shell and make.""" assert '\n' not in s if re.fullmatch('[]+,./0-9@A-Z_a-z-]+', s): return s strans = str.maketrans({"'": "'\\''"}) s = "'%s'" % s.translate(strans) mtrans = str.maketrans({'$': '$$'}) return s.translate(mtrans) @staticmethod def shell_make_quote_list(l, translate_make): """Given a list of strings not containing newlines, quote them for use by the shell and make, returning a single string. If translate_make is true and the first string is 'make', change it to $(MAKE).""" l = [Command.shell_make_quote_string(s) for s in l] if translate_make and l[0] == 'make': l[0] = '$(MAKE)' return ' '.join(l) def shell_make_quote(self): """Return this command quoted for the shell and make.""" return self.shell_make_quote_list(self.command, True) class CommandList(object): """A list of commands run in the build process.""" def __init__(self, desc, keep): """Initialize a CommandList object.""" self.cmdlist = [] self.dir = None self.path = None self.desc = [desc] self.keep = keep def desc_txt(self, desc): """Return the description to use for a command.""" return '%s %s' % (' '.join(self.desc), desc) def use_dir(self, dir): """Set the default directory for subsequent commands.""" self.dir = dir def use_path(self, path): """Set a directory to be prepended to the PATH for subsequent commands.""" self.path = path def push_subdesc(self, subdesc): """Set the default subdescription for subsequent commands (e.g., the name of a component being built, within the series of commands building it).""" self.desc.append(subdesc) def pop_subdesc(self): """Pop a subdescription from the list of descriptions.""" self.desc.pop() def create_use_dir(self, dir): """Remove and recreate a directory and use it for subsequent commands.""" self.add_command_dir('rm', None, ['rm', '-rf', dir]) self.add_command_dir('mkdir', None, ['mkdir', '-p', dir]) self.use_dir(dir) def create_copy_dir(self, src, dest): """Remove a directory and recreate it as a copy from the given source.""" self.add_command_dir('copy-rm', None, ['rm', '-rf', dest]) parent = os.path.dirname(dest) self.add_command_dir('copy-mkdir', None, ['mkdir', '-p', parent]) self.add_command_dir('copy', None, ['cp', '-a', src, dest]) def add_command_dir(self, desc, dir, command, always_run=False): """Add a command to run in a given directory.""" cmd = Command(self.desc_txt(desc), len(self.cmdlist), dir, self.path, command, always_run) self.cmdlist.append(cmd) def add_command(self, desc, command, always_run=False): """Add a command to run in the default directory.""" cmd = Command(self.desc_txt(desc), len(self.cmdlist), self.dir, self.path, command, always_run) self.cmdlist.append(cmd) def cleanup_dir(self, desc='cleanup', dir=None): """Clean up a build directory. If no directory is specified, the default directory is cleaned up and ceases to be the default directory.""" if dir is None: dir = self.dir self.use_dir(None) if self.keep != 'all': self.add_command_dir(desc, None, ['rm', '-rf', dir], always_run=(self.keep == 'none')) def makefile_commands(self, wrapper, logsdir): """Return the sequence of commands in the form of text for a Makefile. The given wrapper script takes arguments: base of logs for previous command, or empty; base of logs for this command; description; directory; PATH addition; the command itself.""" # prev_base is the base of the name for logs of the previous # command that is not always-run (that is, a build command, # whose failure should stop subsequent build commands from # being run, as opposed to a cleanup command, which is run # even if previous commands failed). prev_base = '' cmds = [] for c in self.cmdlist: ctxt = c.shell_make_quote() if prev_base and not c.always_run: prev_log = os.path.join(logsdir, prev_base) else: prev_log = '' this_log = os.path.join(logsdir, c.logbase) if not c.always_run: prev_base = c.logbase if c.dir is None: dir = '' else: dir = c.dir if c.path is None: path = '' else: path = c.path prelims = [wrapper, prev_log, this_log, c.desc, dir, path] prelim_txt = Command.shell_make_quote_list(prelims, False) cmds.append('\t@%s %s' % (prelim_txt, ctxt)) return '\n'.join(cmds) def status_logs(self, logsdir): """Return the list of log files with command status.""" return [os.path.join(logsdir, '%s-status.txt' % c.logbase) for c in self.cmdlist] def get_parser(): """Return an argument parser for this module.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-j', dest='parallelism', help='Run this number of jobs in parallel', type=int, default=os.cpu_count()) parser.add_argument('--keep', dest='keep', help='Whether to keep all build directories, ' 'none or only those from failed builds', default='none', choices=('none', 'all', 'failed')) parser.add_argument('--replace-sources', action='store_true', help='Remove and replace source directories ' 'with the wrong version of a component') parser.add_argument('--strip', action='store_true', help='Strip installed glibc libraries') parser.add_argument('topdir', help='Toplevel working directory') parser.add_argument('action', help='What to do', choices=('checkout', 'bot-cycle', 'bot', 'host-libraries', 'compilers', 'glibcs')) parser.add_argument('configs', help='Versions to check out or configurations to build', nargs='*') return parser def main(argv): """The main entry point.""" parser = get_parser() opts = parser.parse_args(argv) topdir = os.path.abspath(opts.topdir) ctx = Context(topdir, opts.parallelism, opts.keep, opts.replace_sources, opts.strip, opts.action) ctx.run_builds(opts.action, opts.configs) if __name__ == '__main__': main(sys.argv[1:])
gpl-2.0
-956,953,792,958,987,000
43.574401
100
0.496683
false
geokrety/geokrety-api
tests/unittests/api/moves_comments/test_move_comment_delete.py
2
3472
# -*- coding: utf-8 -*- from flask import current_app from parameterized import parameterized from app.api.helpers.data_layers import (MOVE_COMMENT_TYPE_MISSING, MOVE_TYPE_DROPPED) from tests.unittests.utils.base_test_case import BaseTestCase, request_context from tests.unittests.utils.payload.move_comment import MoveCommentPayload class TestMoveCommentDelete(BaseTestCase): """Test move comment delete""" @parameterized.expand([ [None, 401], ['admin', 200], ['user_1', 200], # author ['user_2', 403], ]) @request_context def test_as_(self, username, expected): user = getattr(self, username) if username else None move_comment = self.blend_move_comment(author=self.user_1) MoveCommentPayload().delete(move_comment.id, user=user, code=expected) @parameterized.expand([ [None, 401], ['admin', 200], ['user_1', 200], # GeoKret owner ['user_2', 403], ]) @request_context def test_geokret_owner_can_delete_comments(self, username, expected): user = getattr(self, username) if username else None geokret = self.blend_geokret(owner=self.user_1) move = self.blend_move(geokret=geokret) move_comment = self.blend_move_comment(move=move) MoveCommentPayload().delete(move_comment.id, user=user, code=expected) @parameterized.expand([ [True, 200], [False, 403], ]) @request_context def test_option_geokret_owner_can_delete_comments(self, enable, expected): current_app.config['ALLOW_GEOKRET_OWNER_TO_MODERATE_MOVE_COMMENTS'] = enable geokret = self.blend_geokret(owner=self.user_1) move = self.blend_move(geokret=geokret) move_comment = self.blend_move_comment(move=move) MoveCommentPayload()\ .delete(move_comment.id, user=self.user_1, code=expected) current_app.config['ALLOW_GEOKRET_OWNER_TO_MODERATE_MOVE_COMMENTS'] = True @parameterized.expand([ [None, 401], ['admin', 200], ['user_1', 200], # move author ['user_2', 403], ]) @request_context def test_move_author_can_delete_comments(self, username, expected): user = getattr(self, username) if username else None move = self.blend_move(author=self.user_1) move_comment = self.blend_move_comment(move=move) MoveCommentPayload().delete(move_comment.id, user=user, code=expected) @parameterized.expand([ [True, 200], [False, 403], ]) @request_context def test_option_move_author_can_moderate_move_comments(self, enable, expected): current_app.config['ALLOW_MOVE_AUTHOR_TO_MODERATE_MOVE_COMMENTS'] = enable move = self.blend_move(author=self.user_1) move_comment = self.blend_move_comment(move=move) MoveCommentPayload()\ .delete(move_comment.id, user=self.user_1, code=expected) current_app.config['ALLOW_MOVE_AUTHOR_TO_MODERATE_MOVE_COMMENTS'] = True @request_context def test_geokret_missing_status_computed(self): move = self.blend_move(type=MOVE_TYPE_DROPPED) move_comment = self.blend_move_comment(move=move, type=MOVE_COMMENT_TYPE_MISSING, author=self.user_1) self.assertTrue(move.geokret.missing) MoveCommentPayload().delete(move_comment.id, user=self.user_1) self.assertFalse(move.geokret.missing)
gpl-3.0
5,816,123,954,174,530,000
38.908046
109
0.648329
false
mikaelpatel/Cosa
build/PlatformIO/platform/cosa/platform.py
3
1445
# Copyright 2014-present PlatformIO <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from platformio.managers.platform import PlatformBase class CosaPlatform(PlatformBase): def configure_default_packages(self, variables, targets): if variables.get("board"): board_config = self.board_config(variables.get("board")) disable_tool = "tool-micronucleus" if "digispark" in board_config.get("build.core", ""): disable_tool = "tool-avrdude" if disable_tool in self.packages: del self.packages[disable_tool] return PlatformBase.configure_default_packages( self, variables, targets) def on_run_err(self, line): # pylint: disable=R0201 # fix STDERR "flash written" for avrdude if "avrdude" in line: self.on_run_out(line) else: PlatformBase.on_run_err(self, line)
lgpl-2.1
-3,250,313,395,905,753,000
38.054054
74
0.680277
false
missionpinball/mpf-examples
cookbook/TAF_mansion_awards/tests/test_taf_mansion_awards.py
1
8600
from mpf.tests.MpfMachineTestCase import MpfMachineTestCase class TestTafMansionAwards(MpfMachineTestCase): def _start_single_player_game(self, secs_since_plunge): self.hit_and_release_switch('start') # game should be running self.assertIsNotNone(self.machine.game) self.assertEqual(1, self.machine.game.player.ball) # advance enough time for the balls to eject and stuff self.advance_time_and_run() # ball should be sitting in the plunger lane self.assertEqual(self.machine.ball_devices.drain.balls, 0) self.assertEqual(self.machine.ball_devices.trough.balls, 2) self.assertEqual(self.machine.ball_devices.plunger_lane.balls, 1) # playfield expects a ball self.assertEqual(1, self.machine.playfield.available_balls) # but its not there yet self.assertEqual(0, self.machine.playfield.balls) # after 20s it's still not there self.advance_time_and_run(20) self.assertEqual(0, self.machine.playfield.balls) # player mechanically ejects self.machine.switch_controller.process_switch('plunger_lane', 0, True) self.advance_time_and_run(secs_since_plunge) # plunger timeout is 3s def test_single_player_game_start(self): self._start_single_player_game(4) # 4 secs since plunge means ball is on the pf self.assertEqual(1, self.machine.playfield.balls) self.assertEqual(self.machine.ball_devices.drain.balls, 0) self.assertEqual(self.machine.ball_devices.trough.balls, 2) self.assertEqual(self.machine.ball_devices.plunger_lane.balls, 0) self.assertModeRunning('mansion_awards') self.assertModeRunning('chair_lit') def test_mansion_awards(self): self._start_single_player_game(4) # make sure the selected achievement knows it's selected self.assertEqual( self.machine.achievement_groups.mansion_awards._selected_member.state, 'selected') # The initial selected achievement should be one of these 2 self.assertIn( self.machine.achievement_groups.mansion_awards._selected_member, [self.machine.achievements.mamushka, self.machine.achievements.hit_cousin_it]) # it's light should be flashing self.assertLightFlashing( self.machine.achievement_groups.mansion_awards._selected_member.config['show_tokens']['lights']) # There should be 11 more enabled achievements enabled_achievements = [x for x in self.machine.achievements if x.state == 'enabled'] self.assertEqual(len(enabled_achievements), 11) # all those lights should be off for ach in enabled_achievements: self.assertLightOff(ach.config['show_tokens']['lights']) # Tour the mansion should be disabled self.assertEqual(self.machine.achievements.tour_mansion.state, "disabled") self.assertMansionLit() # pop bumper hits should change the selected award # create a list of all the mansion achievements achievements = set( self.machine.achievement_groups.mansion_awards.config['achievements'][:]) # now hit the pop bumper a bunch of times and make sure all the awards # are selected while achievements: self.hit_and_release_switch('upper_left_jet') achievements.discard( self.machine.achievement_groups.mansion_awards._selected_member) # let's shoot the electric chair to get the selected award selected_achievement = self.machine.achievement_groups.mansion_awards._selected_member self.hit_switch_and_run('electric_chair', 1) # that one should be complete now self.assertEqual(selected_achievement.state, 'completed') # new one should be selected new_selected_achievement = self.machine.achievement_groups.mansion_awards._selected_member self.assertNotEqual(selected_achievement, new_selected_achievement) self.assertMansionNotLit() def test_award_from_swamp(self): self._start_single_player_game(4) selected_achievement = self.machine.achievement_groups.mansion_awards._selected_member self.hit_switch_and_run('swamp_kickout', 1) # that one should be complete now self.assertEqual(selected_achievement.state, 'completed') # new one should be selected new_selected_achievement = self.machine.achievement_groups.mansion_awards._selected_member self.assertNotEqual(selected_achievement, new_selected_achievement) self.assertMansionNotLit() def _start_and_complete_first_award(self): self._start_single_player_game(4) selected_achievement = self.machine.achievement_groups.mansion_awards._selected_member self.hit_switch_and_run('electric_chair', 1) # that one should be complete now self.assertEqual(selected_achievement.state, 'completed') # new one should be selected new_selected_achievement = self.machine.achievement_groups.mansion_awards._selected_member self.assertNotEqual(selected_achievement, new_selected_achievement) self.assertMansionNotLit() # should be 10 achievements remaining "enabled" # (since there's 1 complete, 1 selected) enabled_achievements = [x for x in self.machine.achievements if x.state == 'enabled'] self.assertEqual(len(enabled_achievements), 10) return new_selected_achievement def assertMansionLit(self): self.assertTrue(self.machine.achievement_groups.mansion_awards.enabled) self.assertLightOn('electric_chair_red') self.assertLightOn('electric_chair_yellow') def assertMansionNotLit(self): self.assertFalse(self.machine.achievement_groups.mansion_awards.enabled) self.assertLightOff('electric_chair_red') self.assertLightOff('electric_chair_yellow') def test_relighting_from_ramp(self): selected = self._start_and_complete_first_award() # hit the chair, should not award self.hit_switch_and_run('electric_chair', 1) self.assertMansionNotLit() self.assertEqual(selected, self.machine.achievement_groups.mansion_awards._selected_member) self.assertFalse(self.machine.achievement_groups.mansion_awards._enabled) # Still should be 10 enabled achievements enabled_achievements = [x for x in self.machine.achievements if x.state == 'enabled'] self.assertEqual(len(enabled_achievements), 10) # hit the ramp self.hit_and_release_switch('center_ramp') self.advance_time_and_run(.1) self.assertMansionLit() # 5 secs later, should still be lit self.advance_time_and_run(5) self.assertMansionLit() # hit the inlane self.hit_and_release_switch('right_inlane') self.advance_time_and_run(.1) self.assertMansionLit() # 5 secs later, should still be lit self.advance_time_and_run(5) self.assertMansionLit() # hit the swamp, should award self.hit_switch_and_run('swamp_kickout', 1) self.assertMansionNotLit() self.assertModeNotRunning('chair_lit') self.assertModeNotRunning('chair_lit_3s') def test_relighting_from_inlane(self): self._start_and_complete_first_award() for _ in range(3): # do this 3 times just for yucks # hit the inlane self.hit_and_release_switch('right_inlane') self.advance_time_and_run(.1) self.assertMansionLit() # more than 3s, it unlights self.advance_time_and_run(3) self.assertMansionNotLit() # shoot the inlane, then within the 3 secs, shoot the ramp, make # sure the inlane timer doesn't kill the chair since the ramp should # keep it lit self.hit_and_release_switch('right_inlane') self.advance_time_and_run(.1) self.assertMansionLit() self.advance_time_and_run(2) # hit the ramp self.hit_and_release_switch('center_ramp') self.advance_time_and_run(.1) self.assertMansionLit() # 5 secs later, should still be lit self.advance_time_and_run(5) self.assertMansionLit()
mit
8,448,376,711,812,944,000
34.983264
108
0.654302
false
appsembler/edx-platform
common/djangoapps/track/views/tests/test_segmentio.py
14
19034
"""Ensure we can parse events sent to us from the Segment webhook integration""" from datetime import datetime import json from ddt import ddt, data, unpack from mock import sentinel from nose.plugins.attrib import attr from django.contrib.auth.models import User from django.test.utils import override_settings from openedx.core.lib.tests.assertions.events import assert_event_matches from track.middleware import TrackMiddleware from track.views import segmentio from track.views.tests.base import ( SegmentIOTrackingTestCaseBase, SEGMENTIO_TEST_ENDPOINT, SEGMENTIO_TEST_USER_ID ) def expect_failure_with_message(message): """Ensure the test raises an exception and does not emit an event""" def test_decorator(func): def test_decorated(self, *args, **kwargs): self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs) self.assert_no_events_emitted() return test_decorated return test_decorator @attr(shard=3) @ddt class SegmentIOTrackingTestCase(SegmentIOTrackingTestCaseBase): """ Test processing of Segment events. """ def test_get_request(self): request = self.request_factory.get(SEGMENTIO_TEST_ENDPOINT) response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 405) self.assert_no_events_emitted() @override_settings( TRACKING_SEGMENTIO_WEBHOOK_SECRET=None ) def test_no_secret_config(self): request = self.request_factory.post(SEGMENTIO_TEST_ENDPOINT) response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 401) self.assert_no_events_emitted() def test_no_secret_provided(self): request = self.request_factory.post(SEGMENTIO_TEST_ENDPOINT) response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 401) self.assert_no_events_emitted() def test_secret_mismatch(self): request = self.create_request(key='y') response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 401) self.assert_no_events_emitted() @data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen') def test_segmentio_ignore_actions(self, action): self.post_segmentio_event(action=action) self.assert_no_events_emitted() def test_segmentio_ignore_unknown_libraries(self): self.post_segmentio_event(library_name='foo') self.assert_no_events_emitted() @expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST) def test_no_user_for_user_id(self): self.post_segmentio_event(user_id=40) @expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID) def test_invalid_user_id(self): self.post_segmentio_event(user_id='foobar') @data('foo/bar/baz', 'course-v1:foo+bar+baz') def test_success(self, course_id): middleware = TrackMiddleware() request = self.create_request( data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) middleware.process_request(request) # The middleware normally emits an event, make sure it doesn't in this case. self.assert_no_events_emitted() try: response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 200) expected_event = { 'accept_language': '', 'referer': '', 'username': str(sentinel.username), 'ip': '', 'session': '', 'event_source': 'mobile', 'event_type': str(sentinel.name), 'name': str(sentinel.name), 'event': {'foo': 'bar'}, 'agent': str(sentinel.user_agent), 'page': None, 'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"), 'host': 'testserver', 'context': { 'application': { 'name': 'edx.mobile.android', 'version': '1.0.1', }, 'user_id': SEGMENTIO_TEST_USER_ID, 'course_id': course_id, 'org_id': u'foo', 'path': SEGMENTIO_TEST_ENDPOINT, 'client': { 'library': { 'name': 'test-app', 'version': 'unknown' }, 'app': { 'version': '1.0.1', }, }, 'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"), }, } finally: middleware.process_response(request, None) assert_event_matches(expected_event, self.get_event()) def test_invalid_course_id(self): request = self.create_request( data=self.create_segmentio_event_json(course_id='invalid'), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) segmentio.track_segmentio_event(request) self.assert_events_emitted() @expect_failure_with_message(segmentio.ERROR_MISSING_NAME) def test_missing_name(self): sample_event_raw = self.create_segmentio_event() del sample_event_raw['properties']['name'] request = self.create_request( data=json.dumps(sample_event_raw), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) segmentio.track_segmentio_event(request) @expect_failure_with_message(segmentio.ERROR_MISSING_DATA) def test_missing_data(self): sample_event_raw = self.create_segmentio_event() del sample_event_raw['properties']['data'] request = self.create_request( data=json.dumps(sample_event_raw), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) segmentio.track_segmentio_event(request) @expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP) def test_missing_timestamp(self): sample_event_raw = self.create_event_without_fields('timestamp') request = self.create_request( data=json.dumps(sample_event_raw), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) segmentio.track_segmentio_event(request) @expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT) def test_missing_received_at(self): sample_event_raw = self.create_event_without_fields('receivedAt') request = self.create_request( data=json.dumps(sample_event_raw), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) segmentio.track_segmentio_event(request) def create_event_without_fields(self, *fields): """Create a fake event and remove some fields from it""" event = self.create_segmentio_event() for field in fields: if field in event: del event[field] return event def test_string_user_id(self): User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) self.post_segmentio_event(user_id=str(SEGMENTIO_TEST_USER_ID)) self.assert_events_emitted() def test_hiding_failure(self): sample_event_raw = self.create_event_without_fields('timestamp') request = self.create_request( data=json.dumps(sample_event_raw), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 200) self.assert_no_events_emitted() @data( ('edx.video.played', 'play_video'), ('edx.video.paused', 'pause_video'), ('edx.video.stopped', 'stop_video'), ('edx.video.loaded', 'load_video'), ('edx.video.position.changed', 'seek_video'), ('edx.video.transcript.shown', 'show_transcript'), ('edx.video.transcript.hidden', 'hide_transcript'), ) @unpack def test_video_event(self, name, event_type): course_id = 'foo/bar/baz' middleware = TrackMiddleware() input_payload = { 'current_time': 132.134456, 'module_id': 'i4x://foo/bar/baz/some_module', 'code': 'mobile' } if name == 'edx.video.loaded': # We use the same expected payload for all of these types of events, but the load video event is the only # one that is not actually expected to contain a "current time" field. So we remove it from the expected # event here. del input_payload['current_time'] request = self.create_request( data=self.create_segmentio_event_json( name=name, data=input_payload, context={ 'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2', 'course_id': course_id, 'application': { 'name': 'edx.mobileapp.android', 'version': '29', 'component': 'videoplayer' } }), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) middleware.process_request(request) try: response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 200) expected_event = { 'accept_language': '', 'referer': '', 'username': str(sentinel.username), 'ip': '', 'session': '', 'event_source': 'mobile', 'event_type': event_type, 'name': name, 'agent': str(sentinel.user_agent), 'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity', 'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"), 'host': 'testserver', 'context': { 'user_id': SEGMENTIO_TEST_USER_ID, 'course_id': course_id, 'org_id': 'foo', 'path': SEGMENTIO_TEST_ENDPOINT, 'client': { 'library': { 'name': 'test-app', 'version': 'unknown' }, 'app': { 'version': '1.0.1', }, }, 'application': { 'name': 'edx.mobileapp.android', 'version': '29', 'component': 'videoplayer' }, 'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"), }, 'event': { 'currentTime': 132.134456, 'id': 'i4x-foo-bar-baz-some_module', 'code': 'mobile' } } if name == 'edx.video.loaded': # We use the same expected payload for all of these types of events, but the load video event is the # only one that is not actually expected to contain a "current time" field. So we remove it from the # expected event here. del expected_event['event']['currentTime'] finally: middleware.process_response(request, None) actual_event = self.get_event() assert_event_matches(expected_event, actual_event) @data( # Verify positive slide case. Verify slide to onSlideSeek. Verify # edx.video.seeked emitted from iOS v1.0.02 is changed to # edx.video.position.changed. (1, 1, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'), # Verify negative slide case. Verify slide to onSlideSeek. Verify # edx.video.seeked to edx.video.position.changed. (-2, -2, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'), # Verify +30 is changed to -30 which is incorrectly emitted in iOS # v1.0.02. Verify skip to onSkipSeek (30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'), # Verify the correct case of -30 is also handled as well. Verify skip # to onSkipSeek (-30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'), # Verify positive slide case where onSkipSeek is changed to # onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is # changed to edx.video.position.changed. (1, 1, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'), # Verify positive slide case where onSkipSeek is changed to # onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is # changed to edx.video.position.changed. (-2, -2, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'), # Verify positive skip case where onSkipSeek is not changed and does # not become negative. (30, 30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'), # Verify positive skip case where onSkipSeek is not changed. (-30, -30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02') ) @unpack def test_previous_builds(self, requested_skip_interval, expected_skip_interval, seek_type_key, seek_type, expected_seek_type, name, expected_name, platform, version, ): """ Test backwards compatibility of previous app builds iOS version 1.0.02: Incorrectly emits the skip back 30 seconds as +30 instead of -30. Android version 1.0.02: Skip and slide were both being returned as a skip. Skip or slide is determined by checking if the skip time is == -30 Additionally, for both of the above mentioned versions, edx.video.seeked was sent instead of edx.video.position.changed """ course_id = 'foo/bar/baz' middleware = TrackMiddleware() input_payload = { "code": "mobile", "new_time": 89.699177437, "old_time": 119.699177437, seek_type_key: seek_type, "requested_skip_interval": requested_skip_interval, 'module_id': 'i4x://foo/bar/baz/some_module', } request = self.create_request( data=self.create_segmentio_event_json( name=name, data=input_payload, context={ 'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2', 'course_id': course_id, 'application': { 'name': platform, 'version': version, 'component': 'videoplayer' } }, ), content_type='application/json' ) User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(sentinel.username)) middleware.process_request(request) try: response = segmentio.segmentio_event(request) self.assertEquals(response.status_code, 200) expected_event = { 'accept_language': '', 'referer': '', 'username': str(sentinel.username), 'ip': '', 'session': '', 'event_source': 'mobile', 'event_type': "seek_video", 'name': expected_name, 'agent': str(sentinel.user_agent), 'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity', 'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"), 'host': 'testserver', 'context': { 'user_id': SEGMENTIO_TEST_USER_ID, 'course_id': course_id, 'org_id': 'foo', 'path': SEGMENTIO_TEST_ENDPOINT, 'client': { 'library': { 'name': 'test-app', 'version': 'unknown' }, 'app': { 'version': '1.0.1', }, }, 'application': { 'name': platform, 'version': version, 'component': 'videoplayer' }, 'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"), }, 'event': { "code": "mobile", "new_time": 89.699177437, "old_time": 119.699177437, "type": expected_seek_type, "requested_skip_interval": expected_skip_interval, 'id': 'i4x-foo-bar-baz-some_module', } } finally: middleware.process_response(request, None) actual_event = self.get_event() assert_event_matches(expected_event, actual_event)
agpl-3.0
3,170,615,436,583,846,400
41.203991
149
0.545918
false
alex/sqlalchemy
test/ext/declarative/test_mixin.py
3
39191
from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy.ext import declarative as decl import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import relationship, create_session, class_mapper, \ configure_mappers, clear_mappers, \ deferred, column_property, \ Session from sqlalchemy.util import classproperty from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.testing import fixtures Base = None class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults): def setup(self): global Base Base = decl.declarative_base(testing.db) def teardown(self): Session.close_all() clear_mappers() Base.metadata.drop_all() class DeclarativeMixinTest(DeclarativeTestBase): def test_simple(self): class MyMixin(object): id = Column(Integer, primary_key=True, test_needs_autoincrement=True) def foo(self): return 'bar' + str(self.id) class MyModel(Base, MyMixin): __tablename__ = 'test' name = Column(String(100), nullable=False, index=True) Base.metadata.create_all() session = create_session() session.add(MyModel(name='testing')) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, 'testing') eq_(obj.foo(), 'bar1') def test_unique_column(self): class MyMixin(object): id = Column(Integer, primary_key=True) value = Column(String, unique=True) class MyModel(Base, MyMixin): __tablename__ = 'test' assert MyModel.__table__.c.value.unique def test_hierarchical_bases(self): class MyMixinParent: id = Column(Integer, primary_key=True, test_needs_autoincrement=True) def foo(self): return 'bar' + str(self.id) class MyMixin(MyMixinParent): baz = Column(String(100), nullable=False, index=True) class MyModel(Base, MyMixin): __tablename__ = 'test' name = Column(String(100), nullable=False, index=True) Base.metadata.create_all() session = create_session() session.add(MyModel(name='testing', baz='fu')) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, 'testing') eq_(obj.foo(), 'bar1') eq_(obj.baz, 'fu') def test_mixin_overrides(self): """test a mixin that overrides a column on a superclass.""" class MixinA(object): foo = Column(String(50)) class MixinB(MixinA): foo = Column(Integer) class MyModelA(Base, MixinA): __tablename__ = 'testa' id = Column(Integer, primary_key=True) class MyModelB(Base, MixinB): __tablename__ = 'testb' id = Column(Integer, primary_key=True) eq_(MyModelA.__table__.c.foo.type.__class__, String) eq_(MyModelB.__table__.c.foo.type.__class__, Integer) def test_not_allowed(self): class MyMixin: foo = Column(Integer, ForeignKey('bar.id')) def go(): class MyModel(Base, MyMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) class MyRelMixin: foo = relationship('Bar') def go(): class MyModel(Base, MyRelMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) class MyDefMixin: foo = deferred(Column('foo', String)) def go(): class MyModel(Base, MyDefMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) class MyCPropMixin: foo = column_property(Column('foo', String)) def go(): class MyModel(Base, MyCPropMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) def test_table_name_inherited(self): class MyMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): pass eq_(MyModel.__table__.name, 'mymodel') def test_classproperty_still_works(self): class MyMixin(object): @classproperty def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): __tablename__ = 'overridden' eq_(MyModel.__table__.name, 'overridden') def test_table_name_not_inherited(self): class MyMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): __tablename__ = 'overridden' eq_(MyModel.__table__.name, 'overridden') def test_table_name_inheritance_order(self): class MyMixin1: @declared_attr def __tablename__(cls): return cls.__name__.lower() + '1' class MyMixin2: @declared_attr def __tablename__(cls): return cls.__name__.lower() + '2' class MyModel(Base, MyMixin1, MyMixin2): id = Column(Integer, primary_key=True) eq_(MyModel.__table__.name, 'mymodel1') def test_table_name_dependent_on_subclass(self): class MyHistoryMixin: @declared_attr def __tablename__(cls): return cls.parent_name + '_changelog' class MyModel(Base, MyHistoryMixin): parent_name = 'foo' id = Column(Integer, primary_key=True) eq_(MyModel.__table__.name, 'foo_changelog') def test_table_args_inherited(self): class MyMixin: __table_args__ = {'mysql_engine': 'InnoDB'} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_table_args_inherited_descriptor(self): class MyMixin: @declared_attr def __table_args__(cls): return {'info': cls.__name__} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__table__.info, 'MyModel') def test_table_args_inherited_single_table_inheritance(self): class MyMixin: __table_args__ = {'mysql_engine': 'InnoDB'} class General(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {'polymorphic_on': type_} class Specific(General): __mapper_args__ = {'polymorphic_identity': 'specific'} assert Specific.__table__ is General.__table__ eq_(General.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_columns_single_table_inheritance(self): """Test a column on a mixin with an alternate attribute name, mapped to a superclass and single-table inheritance subclass. The superclass table gets the column, the subclass shares the MapperProperty. """ class MyMixin(object): foo = Column('foo', Integer) bar = Column('bar_newname', Integer) class General(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {'polymorphic_on': type_} class Specific(General): __mapper_args__ = {'polymorphic_identity': 'specific'} assert General.bar.prop.columns[0] is General.__table__.c.bar_newname assert len(General.bar.prop.columns) == 1 assert Specific.bar.prop is General.bar.prop @testing.skip_if(lambda: testing.against('oracle'), "Test has an empty insert in it at the moment") def test_columns_single_inheritance_conflict_resolution(self): """Test that a declared_attr can return the existing column and it will be ignored. this allows conditional columns to be added. See [ticket:2472]. """ class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Mixin(object): @declared_attr def target_id(cls): return cls.__table__.c.get('target_id', Column(Integer, ForeignKey('other.id')) ) @declared_attr def target(cls): return relationship("Other") class Engineer(Mixin, Person): """single table inheritance""" class Manager(Mixin, Person): """single table inheritance""" class Other(Base): __tablename__ = 'other' id = Column(Integer, primary_key=True) is_( Engineer.target_id.property.columns[0], Person.__table__.c.target_id ) is_( Manager.target_id.property.columns[0], Person.__table__.c.target_id ) # do a brief round trip on this Base.metadata.create_all() session = Session() o1, o2 = Other(), Other() session.add_all([ Engineer(target=o1), Manager(target=o2), Manager(target=o1) ]) session.commit() eq_(session.query(Engineer).first().target, o1) def test_columns_joined_table_inheritance(self): """Test a column on a mixin with an alternate attribute name, mapped to a superclass and joined-table inheritance subclass. Both tables get the column, in the case of the subclass the two columns are joined under one MapperProperty. """ class MyMixin(object): foo = Column('foo', Integer) bar = Column('bar_newname', Integer) class General(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {'polymorphic_on': type_} class Specific(General): __tablename__ = 'sub' id = Column(Integer, ForeignKey('test.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity': 'specific'} assert General.bar.prop.columns[0] is General.__table__.c.bar_newname assert len(General.bar.prop.columns) == 1 assert Specific.bar.prop is General.bar.prop eq_(len(Specific.bar.prop.columns), 1) assert Specific.bar.prop.columns[0] is General.__table__.c.bar_newname def test_column_join_checks_superclass_type(self): """Test that the logic which joins subclass props to those of the superclass checks that the superclass property is a column. """ class General(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) general_id = Column(Integer, ForeignKey('test.id')) type_ = relationship("General") class Specific(General): __tablename__ = 'sub' id = Column(Integer, ForeignKey('test.id'), primary_key=True) type_ = Column('foob', String(50)) assert isinstance(General.type_.property, sa.orm.RelationshipProperty) assert Specific.type_.property.columns[0] is Specific.__table__.c.foob def test_column_join_checks_subclass_type(self): """Test that the logic which joins subclass props to those of the superclass checks that the subclass property is a column. """ def go(): class General(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column('foob', Integer) class Specific(General): __tablename__ = 'sub' id = Column(Integer, ForeignKey('test.id'), primary_key=True) specific_id = Column(Integer, ForeignKey('sub.id')) type_ = relationship("Specific") assert_raises_message( sa.exc.ArgumentError, "column 'foob' conflicts with property", go ) def test_table_args_overridden(self): class MyMixin: __table_args__ = {'mysql_engine': 'Foo'} class MyModel(Base, MyMixin): __tablename__ = 'test' __table_args__ = {'mysql_engine': 'InnoDB'} id = Column(Integer, primary_key=True) eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_mapper_args_declared_attr(self): class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Person': return {'polymorphic_on': cls.discriminator} else: return {'polymorphic_identity': cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) class Engineer(Person): pass configure_mappers() assert class_mapper(Person).polymorphic_on \ is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer') def test_mapper_args_declared_attr_two(self): # same as test_mapper_args_declared_attr, but we repeat # ComputedMapperArgs on both classes for no apparent reason. class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Person': return {'polymorphic_on': cls.discriminator} else: return {'polymorphic_identity': cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) class Engineer(Person, ComputedMapperArgs): pass configure_mappers() assert class_mapper(Person).polymorphic_on \ is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer') def test_table_args_composite(self): class MyMixin1: __table_args__ = {'info': {'baz': 'bob'}} class MyMixin2: __table_args__ = {'info': {'foo': 'bar'}} class MyModel(Base, MyMixin1, MyMixin2): __tablename__ = 'test' @declared_attr def __table_args__(self): info = {} args = dict(info=info) info.update(MyMixin1.__table_args__['info']) info.update(MyMixin2.__table_args__['info']) return args id = Column(Integer, primary_key=True) eq_(MyModel.__table__.info, {'foo': 'bar', 'baz': 'bob'}) def test_mapper_args_inherited(self): class MyMixin: __mapper_args__ = {'always_refresh': True} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, True) def test_mapper_args_inherited_descriptor(self): class MyMixin: @declared_attr def __mapper_args__(cls): # tenuous, but illustrates the problem! if cls.__name__ == 'MyModel': return dict(always_refresh=True) else: return dict(always_refresh=False) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, True) def test_mapper_args_polymorphic_on_inherited(self): class MyMixin: type_ = Column(String(50)) __mapper_args__ = {'polymorphic_on': type_} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) col = MyModel.__mapper__.polymorphic_on eq_(col.name, 'type_') assert col.table is not None def test_mapper_args_overridden(self): class MyMixin: __mapper_args__ = dict(always_refresh=True) class MyModel(Base, MyMixin): __tablename__ = 'test' __mapper_args__ = dict(always_refresh=False) id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, False) def test_mapper_args_composite(self): class MyMixin1: type_ = Column(String(50)) __mapper_args__ = {'polymorphic_on': type_} class MyMixin2: __mapper_args__ = {'always_refresh': True} class MyModel(Base, MyMixin1, MyMixin2): __tablename__ = 'test' @declared_attr def __mapper_args__(cls): args = {} args.update(MyMixin1.__mapper_args__) args.update(MyMixin2.__mapper_args__) if cls.__name__ != 'MyModel': args.pop('polymorphic_on') args['polymorphic_identity'] = cls.__name__ return args id = Column(Integer, primary_key=True) class MySubModel(MyModel): pass eq_( MyModel.__mapper__.polymorphic_on.name, 'type_' ) assert MyModel.__mapper__.polymorphic_on.table is not None eq_(MyModel.__mapper__.always_refresh, True) eq_(MySubModel.__mapper__.always_refresh, True) eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel') def test_mapper_args_property(self): class MyModel(Base): @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __table_args__(cls): return {'mysql_engine':'InnoDB'} @declared_attr def __mapper_args__(cls): args = {} args['polymorphic_identity'] = cls.__name__ return args id = Column(Integer, primary_key=True) class MySubModel(MyModel): id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True) class MySubModel2(MyModel): __tablename__ = 'sometable' id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True) eq_(MyModel.__mapper__.polymorphic_identity, 'MyModel') eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel') eq_(MyModel.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MySubModel.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MySubModel2.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MyModel.__table__.name, 'mymodel') eq_(MySubModel.__table__.name, 'mysubmodel') def test_mapper_args_custom_base(self): """test the @declared_attr approach from a custom base.""" class Base(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __table_args__(cls): return {'mysql_engine':'InnoDB'} @declared_attr def id(self): return Column(Integer, primary_key=True) Base = decl.declarative_base(cls=Base) class MyClass(Base): pass class MyOtherClass(Base): pass eq_(MyClass.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MyClass.__table__.name, 'myclass') eq_(MyOtherClass.__table__.name, 'myotherclass') assert MyClass.__table__.c.id.table is MyClass.__table__ assert MyOtherClass.__table__.c.id.table is MyOtherClass.__table__ def test_single_table_no_propagation(self): class IdColumn: id = Column(Integer, primary_key=True) class Generic(Base, IdColumn): __tablename__ = 'base' discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) value = Column(Integer()) class Specific(Generic): __mapper_args__ = dict(polymorphic_identity='specific') assert Specific.__table__ is Generic.__table__ eq_(list(Generic.__table__.c.keys()), ['id', 'type', 'value']) assert class_mapper(Specific).polymorphic_on \ is Generic.__table__.c.type eq_(class_mapper(Specific).polymorphic_identity, 'specific') def test_joined_table_propagation(self): class CommonMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} timestamp = Column(Integer) id = Column(Integer, primary_key=True) class Generic(Base, CommonMixin): discriminator = Column('python_type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) class Specific(Generic): __mapper_args__ = dict(polymorphic_identity='specific') id = Column(Integer, ForeignKey('generic.id'), primary_key=True) eq_(Generic.__table__.name, 'generic') eq_(Specific.__table__.name, 'specific') eq_(list(Generic.__table__.c.keys()), ['timestamp', 'id', 'python_type']) eq_(list(Specific.__table__.c.keys()), ['id']) eq_(Generic.__table__.kwargs, {'mysql_engine': 'InnoDB'}) eq_(Specific.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_some_propagation(self): class CommonMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} timestamp = Column(Integer) class BaseType(Base, CommonMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Single(BaseType): __tablename__ = None __mapper_args__ = dict(polymorphic_identity='type1') class Joined(BaseType): __mapper_args__ = dict(polymorphic_identity='type2') id = Column(Integer, ForeignKey('basetype.id'), primary_key=True) eq_(BaseType.__table__.name, 'basetype') eq_(list(BaseType.__table__.c.keys()), ['timestamp', 'type', 'id', 'value']) eq_(BaseType.__table__.kwargs, {'mysql_engine': 'InnoDB'}) assert Single.__table__ is BaseType.__table__ eq_(Joined.__table__.name, 'joined') eq_(list(Joined.__table__.c.keys()), ['id']) eq_(Joined.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_col_copy_vs_declared_attr_joined_propagation(self): class Mixin(object): a = Column(Integer) @declared_attr def b(cls): return Column(Integer) class A(Mixin, Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) assert 'a' in A.__table__.c assert 'b' in A.__table__.c assert 'a' not in B.__table__.c assert 'b' not in B.__table__.c def test_col_copy_vs_declared_attr_joined_propagation_newname(self): class Mixin(object): a = Column('a1', Integer) @declared_attr def b(cls): return Column('b1', Integer) class A(Mixin, Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) assert 'a1' in A.__table__.c assert 'b1' in A.__table__.c assert 'a1' not in B.__table__.c assert 'b1' not in B.__table__.c def test_col_copy_vs_declared_attr_single_propagation(self): class Mixin(object): a = Column(Integer) @declared_attr def b(cls): return Column(Integer) class A(Mixin, Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): pass assert 'a' in A.__table__.c assert 'b' in A.__table__.c def test_non_propagating_mixin(self): class NoJoinedTableNameMixin: @declared_attr def __tablename__(cls): if decl.has_inherited_table(cls): return None return cls.__name__.lower() class BaseType(Base, NoJoinedTableNameMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType): __mapper_args__ = dict(polymorphic_identity='specific') eq_(BaseType.__table__.name, 'basetype') eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value']) assert Specific.__table__ is BaseType.__table__ assert class_mapper(Specific).polymorphic_on \ is BaseType.__table__.c.type eq_(class_mapper(Specific).polymorphic_identity, 'specific') def test_non_propagating_mixin_used_for_joined(self): class TableNameMixin: @declared_attr def __tablename__(cls): if decl.has_inherited_table(cls) and TableNameMixin \ not in cls.__bases__: return None return cls.__name__.lower() class BaseType(Base, TableNameMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType, TableNameMixin): __mapper_args__ = dict(polymorphic_identity='specific') id = Column(Integer, ForeignKey('basetype.id'), primary_key=True) eq_(BaseType.__table__.name, 'basetype') eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value']) eq_(Specific.__table__.name, 'specific') eq_(list(Specific.__table__.c.keys()), ['id']) def test_single_back_propagate(self): class ColumnMixin: timestamp = Column(Integer) class BaseType(Base): __tablename__ = 'foo' discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) class Specific(BaseType, ColumnMixin): __mapper_args__ = dict(polymorphic_identity='specific') eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'timestamp']) def test_table_in_model_and_same_column_in_mixin(self): class ColumnMixin: data = Column(Integer) class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data', Integer), Column('id', Integer, primary_key=True)) model_col = Model.__table__.c.data mixin_col = ColumnMixin.data assert model_col is not mixin_col eq_(model_col.name, 'data') assert model_col.type.__class__ is mixin_col.type.__class__ def test_table_in_model_and_different_named_column_in_mixin(self): class ColumnMixin: tada = Column(Integer) def go(): class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data',Integer), Column('id', Integer,primary_key=True)) foo = relationship("Dest") assert_raises_message(sa.exc.ArgumentError, "Can't add additional column 'tada' when " "specifying __table__", go) def test_table_in_model_and_different_named_alt_key_column_in_mixin(self): # here, the __table__ has a column 'tada'. We disallow # the add of the 'foobar' column, even though it's # keyed to 'tada'. class ColumnMixin: tada = Column('foobar', Integer) def go(): class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data',Integer), Column('tada', Integer), Column('id', Integer,primary_key=True)) foo = relationship("Dest") assert_raises_message(sa.exc.ArgumentError, "Can't add additional column 'foobar' when " "specifying __table__", go) def test_table_in_model_overrides_different_typed_column_in_mixin(self): class ColumnMixin: data = Column(String) class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data', Integer), Column('id', Integer, primary_key=True)) model_col = Model.__table__.c.data mixin_col = ColumnMixin.data assert model_col is not mixin_col eq_(model_col.name, 'data') assert model_col.type.__class__ is Integer def test_mixin_column_ordering(self): class Foo(object): col1 = Column(Integer) col3 = Column(Integer) class Bar(object): col2 = Column(Integer) col4 = Column(Integer) class Model(Base, Foo, Bar): id = Column(Integer, primary_key=True) __tablename__ = 'model' eq_(list(Model.__table__.c.keys()), ['col1', 'col3', 'col2', 'col4', 'id']) def test_honor_class_mro_one(self): class HasXMixin(object): @declared_attr def x(self): return Column(Integer) class Parent(HasXMixin, Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) assert "x" not in Child.__table__.c def test_honor_class_mro_two(self): class HasXMixin(object): @declared_attr def x(self): return Column(Integer) class Parent(HasXMixin, Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) def x(self): return "hi" class C(Parent): __tablename__ = 'c' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) assert C().x() == 'hi' def test_arbitrary_attrs_one(self): class HasMixin(object): @declared_attr def some_attr(cls): return cls.__name__ + "SOME ATTR" class Mapped(HasMixin, Base): __tablename__ = 't' id = Column(Integer, primary_key=True) eq_(Mapped.some_attr, "MappedSOME ATTR") eq_(Mapped.__dict__['some_attr'], "MappedSOME ATTR") def test_arbitrary_attrs_two(self): from sqlalchemy.ext.associationproxy import association_proxy class FilterA(Base): __tablename__ = 'filter_a' id = Column(Integer(), primary_key=True) parent_id = Column(Integer(), ForeignKey('type_a.id')) filter = Column(String()) def __init__(self, filter_, **kw): self.filter = filter_ class FilterB(Base): __tablename__ = 'filter_b' id = Column(Integer(), primary_key=True) parent_id = Column(Integer(), ForeignKey('type_b.id')) filter = Column(String()) def __init__(self, filter_, **kw): self.filter = filter_ class FilterMixin(object): @declared_attr def _filters(cls): return relationship(cls.filter_class, cascade='all,delete,delete-orphan') @declared_attr def filters(cls): return association_proxy('_filters', 'filter') class TypeA(Base, FilterMixin): __tablename__ = 'type_a' filter_class = FilterA id = Column(Integer(), primary_key=True) class TypeB(Base, FilterMixin): __tablename__ = 'type_b' filter_class = FilterB id = Column(Integer(), primary_key=True) TypeA(filters=['foo']) TypeB(filters=['foo']) class DeclarativeMixinPropertyTest(DeclarativeTestBase): def test_column_property(self): class MyMixin(object): @declared_attr def prop_hoho(cls): return column_property(Column('prop', String(50))) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class MyOtherModel(Base, MyMixin): __tablename__ = 'othertest' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) assert MyModel.__table__.c.prop is not None assert MyOtherModel.__table__.c.prop is not None assert MyModel.__table__.c.prop \ is not MyOtherModel.__table__.c.prop assert MyModel.prop_hoho.property.columns \ == [MyModel.__table__.c.prop] assert MyOtherModel.prop_hoho.property.columns \ == [MyOtherModel.__table__.c.prop] assert MyModel.prop_hoho.property \ is not MyOtherModel.prop_hoho.property Base.metadata.create_all() sess = create_session() m1, m2 = MyModel(prop_hoho='foo'), MyOtherModel(prop_hoho='bar') sess.add_all([m1, m2]) sess.flush() eq_(sess.query(MyModel).filter(MyModel.prop_hoho == 'foo' ).one(), m1) eq_(sess.query(MyOtherModel).filter(MyOtherModel.prop_hoho == 'bar').one(), m2) def test_doc(self): """test documentation transfer. the documentation situation with @declared_attr is problematic. at least see if mapped subclasses get the doc. """ class MyMixin(object): @declared_attr def type_(cls): """this is a document.""" return Column(String(50)) @declared_attr def t2(cls): """this is another document.""" return column_property(Column(String(50))) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) configure_mappers() eq_(MyModel.type_.__doc__, """this is a document.""") eq_(MyModel.t2.__doc__, """this is another document.""") def test_column_in_mapper_args(self): class MyMixin(object): @declared_attr def type_(cls): return Column(String(50)) __mapper_args__ = {'polymorphic_on': type_} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) configure_mappers() col = MyModel.__mapper__.polymorphic_on eq_(col.name, 'type_') assert col.table is not None def test_deferred(self): class MyMixin(object): @declared_attr def data(cls): return deferred(Column('data', String(50))) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) Base.metadata.create_all() sess = create_session() sess.add_all([MyModel(data='d1'), MyModel(data='d2')]) sess.flush() sess.expunge_all() d1, d2 = sess.query(MyModel).order_by(MyModel.data) assert 'data' not in d1.__dict__ assert d1.data == 'd1' assert 'data' in d1.__dict__ def _test_relationship(self, usestring): class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) if usestring: @declared_attr def target(cls): return relationship('Target', primaryjoin='Target.id==%s.target_id' % cls.__name__) else: @declared_attr def target(cls): return relationship('Target') class Foo(Base, RefTargetMixin): __tablename__ = 'foo' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class Bar(Base, RefTargetMixin): __tablename__ = 'bar' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class Target(Base): __tablename__ = 'target' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) Base.metadata.create_all() sess = create_session() t1, t2 = Target(), Target() f1, f2, b1 = Foo(target=t1), Foo(target=t2), Bar(target=t1) sess.add_all([f1, f2, b1]) sess.flush() eq_(sess.query(Foo).filter(Foo.target == t2).one(), f2) eq_(sess.query(Bar).filter(Bar.target == t2).first(), None) sess.expire_all() eq_(f1.target, t1) def test_relationship(self): self._test_relationship(False) def test_relationship_primryjoin(self): self._test_relationship(True)
mit
375,318,797,681,062,500
30.252791
79
0.538593
false
Ecpy/ecpy
exopy/measurement/editors/database_access_editor/editor_model.py
1
12086
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright 2015-2018-2018 by Exopy Authors, see AUTHORS for more details. # # Distributed under the terms of the BSD license. # # The full license is in the file LICENCE, distributed with this software. # ----------------------------------------------------------------------------- """Model driving the database exception editor. """ from atom.api import Atom, Typed, List, Dict from ....tasks.api import RootTask, ComplexTask from ....tasks.tasks.database import DatabaseNode from ....utils.container_change import ContainerChange from ....utils.atom_util import tagged_members class NodeModel(Atom): """Object representing the database node state linked to a ComplexTask """ #: Reference to the task this node refers to. task = Typed(ComplexTask) #: Database entries available on the node associated with the task. entries = List() #: Database exceptions present on the node. exceptions = List() #: Database entries for which an access exception exists has_exceptions = List() #: Children nodes children = List() def sort_nodes(self, nodes=None): """Sort the nodes according to the task order. New nodes can be passed to the methods in which case they will replace the existing ones. """ nodes = nodes if nodes is not None else self.children tasks = [t for t in self.task.gather_children() if isinstance(t, ComplexTask)] self.children = sorted(nodes, key=lambda n: tasks.index(n.task)) def add_node(self, node): """Add a node to the children of this one. """ self.sort_nodes(self.children + [node]) def remove_node(self, node): """Remove a node from the children. We also discard all nodes that have no linked task (happen when dicarding a ComplexTask containing other tasks). """ tasks = [t for t in self.task.gather_children() if isinstance(t, ComplexTask)] self.sort_nodes([c for c in self.children if c is not node and c.task in tasks]) def add_exception(self, entry): """Add an access exception. """ task, entry = self._find_task_from_entry(entry) if entry not in task.access_exs: task.add_access_exception(entry, 1) # ========================================================================= # --- Private API --------------------------------------------------------- # ========================================================================= def _post_setattr_task(self, old, new): """Attach and detach observers as needed. """ if new: for m in tagged_members(new, 'child'): new.observe(m, self._react_to_task_children_event) for m in tagged_members(new, 'child_notifier'): new.observe(m, self._react_to_task_children_event) if old: for m in tagged_members(old, 'child'): old.unobserve(m, self._react_to_task_children_event) for m in tagged_members(old, 'child_notifier'): old.unobserve(m, self._react_to_task_children_event) def _react_to_task_children_event(self, change): """Simply reorder the nodes if it was a move event. Only move events are transparent to the database. """ if isinstance(change, ContainerChange): if change.collapsed: for c in change.collapsed: self._react_to_task_children_event(c) if change.moved: self.sort_nodes() def _find_task_from_entry(self, full_entry): """Find the task and short name corresponding to a full entry name. """ possible_tasks = [t for t in self.task.gather_children() if full_entry.startswith(t.name)] if len(possible_tasks) > 1: for p in possible_tasks: e = full_entry[len(p.name)+1:] if e in p.database_entries: break task = p entry = e else: task = possible_tasks[0] entry = full_entry[len(task.name)+1:] return task, entry class EditorModel(Atom): """Model driving the database access editor. """ #: Reference to the root task of the currently edited task hierarchy. root = Typed(RootTask) #: Dictionary storing the nodes for all tasks by path. nodes = Dict() def increase_exc_level(self, path, entry): """Increase the exception level of an access exception. Parameters ---------- path : unicode Path of the node in which the exception to increase is. entry : unicode Entry whose access exception should be increased. """ self._modify_exception_level(path, entry, 1) def decrease_exc_level(self, path, entry): """Decrease the exception level of an access exception. Parameters ---------- path : unicode Path of the node in which the exception to increase is. entry : unicode Entry whose access exception should be increased. """ self._modify_exception_level(path, entry, -1) # ========================================================================= # --- Private API --------------------------------------------------------- # ========================================================================= def _modify_exception_level(self, path, entry, val): """Modify the exception level of an access exception. Parameters ---------- path : unicode Path of the node in which the exception to increase is. entry : unicode Entry whose access exception should be increased. val : int Amount by which to modify the level. """ database_node = self.root.database.go_to_path(path) real_path = path + '/' + database_node.meta['access'][entry] task, entry = self.nodes[real_path]._find_task_from_entry(entry) level = task.access_exs[entry] task.modify_access_exception(entry, level + val) def _post_setattr_root(self, old, new): """Ensure we are observing the right database. """ if old: old.database.unobserve('notifier', self._react_to_entries) old.database.unobserve('access_notifier', self._react_to_exceptions) old.database.unobserve('nodes_notifier', self._react_to_nodes) if new: new.database.observe('notifier', self._react_to_entries) new.database.observe('access_notifier', self._react_to_exceptions) new.database.observe('nodes_notifier', self._react_to_nodes) database_nodes = new.database.list_nodes() nodes = {p: self._model_from_node(p, n) for p, n in database_nodes.items()} # Write the has_exception member now that all nodes exists for p, n in nodes.items(): if n.exceptions: access = database_nodes[p].meta['access'] for k, v in access.items(): nodes[p + '/' + v].has_exceptions.append(k) # Set the proper parent for each node knowing there is only one # root node whose path is root, all other contain at least one / # and isolating the first part of the path e find its parent for p, m in nodes.items(): if '/' in p: p, _ = p.rsplit('/', 1) nodes[p].children.append(m) # Sort nodes now that they all have a parent for nmodel in nodes.values(): nmodel.sort_nodes() self.nodes = nodes def _react_to_entries(self, news): """Handle modification to entries. """ if isinstance(news, list): for n in news: self._react_to_entries(n) return path, entry = news[1].rsplit('/', 1) n = self.nodes[path] if news[0] == 'added': n.entries = n.entries[:] + [entry] elif news[0] == 'renamed': entries = n.entries[:] del entries[entries.index(entry)] entries.append(news[2].rsplit('/', 1)[1]) n.entries = entries elif news[0] == 'removed': entries = n.entries[:] del entries[entries.index(entry)] n.entries = entries def _react_to_exceptions(self, news): """Handle modifications to the access exceptions. """ if isinstance(news, list): for n in news: self._react_to_exceptions(n) return path = news[1] n = self.nodes[path] origin_node = self.nodes[path + '/' + news[2] if news[2] else path] if news[0] == 'added': n.exceptions = n.exceptions[:] + [news[3]] origin_node.has_exceptions = (origin_node.has_exceptions[:] + [news[3]]) elif news[0] == 'renamed': exceptions = n.exceptions[:] del exceptions[exceptions.index(news[3])] exceptions.append(news[4]) n.exceptions = exceptions exs = origin_node.has_exceptions[:] del exs[exs.index(news[3])] exs.append(news[4]) origin_node.has_exceptions = exs elif news[0] == 'removed': exceptions = n.exceptions[:] if news[3]: del exceptions[exceptions.index(news[3])] n.exceptions = exceptions exs = origin_node.has_exceptions[:] del exs[exs.index(news[3])] origin_node.has_exceptions = exs else: n.exceptions = [] origin_node.has_exceptions = [] def _react_to_nodes(self, news): """Handle modifications of the database nodes. """ if isinstance(news, list): for n in news: self._react_to_nodes(n) return path = news[1] + '/' + news[2] if news[0] == 'added': parent = self.nodes[news[1]] model = self._model_from_node(path, news[3]) parent.add_node(model) self.nodes[path] = model elif news[0] == 'renamed': new_path = news[1] + '/' + news[3] nodes = self.nodes.copy() for k, v in nodes.items(): if k.startswith(path): del self.nodes[k] self.nodes[new_path + k[len(path):]] = v elif news[0] == 'removed': node = self.nodes[path] del self.nodes[path] parent = self.nodes[path.rsplit('/', 1)[0]] parent.remove_node(node) def _get_task(self, path): """Retrieve the task corresponding to a certain path. """ if '/' not in path: return self.root names = path.split('/')[1:] task = self.root for n in names: for t in task.gather_children() + [None]: if t is None: raise ValueError('No task matching the specified path') if t.name == n: task = t break return task def _model_from_node(self, path, node): """Build a new model from a node informations. """ entries = [k for k, v in node.data.items() if not isinstance(v, DatabaseNode)] excs = list(node.meta.get('access', {}).keys()) return NodeModel(entries=entries, exceptions=excs, task=self._get_task(path))
bsd-3-clause
5,820,192,825,398,666,000
32.949438
79
0.518286
false
solin319/incubator-mxnet
tests/python/unittest/test_gluon_data.py
11
4615
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import tarfile import unittest import mxnet as mx import numpy as np from mxnet import gluon def test_array_dataset(): X = np.random.uniform(size=(10, 20)) Y = np.random.uniform(size=(10,)) dataset = gluon.data.ArrayDataset(X, Y) loader = gluon.data.DataLoader(dataset, 2) for i, (x, y) in enumerate(loader): assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2]) assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2]) dataset = gluon.data.ArrayDataset(X) loader = gluon.data.DataLoader(dataset, 2) for i, x in enumerate(loader): assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2]) def prepare_record(): if not os.path.isdir("data/test_images"): os.makedirs('data/test_images') if not os.path.isdir("data/test_images/test_images"): gluon.utils.download("http://data.mxnet.io/data/test_images.tar.gz", "data/test_images.tar.gz") tarfile.open('data/test_images.tar.gz').extractall('data/test_images/') if not os.path.exists('data/test.rec'): imgs = os.listdir('data/test_images/test_images') record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w') for i, img in enumerate(imgs): str_img = open('data/test_images/test_images/'+img, 'rb').read() s = mx.recordio.pack((0, i, i, 0), str_img) record.write_idx(i, s) return 'data/test.rec' def test_recordimage_dataset(): recfile = prepare_record() dataset = gluon.data.vision.ImageRecordDataset(recfile) loader = gluon.data.DataLoader(dataset, 1) for i, (x, y) in enumerate(loader): assert x.shape[0] == 1 and x.shape[3] == 3 assert y.asscalar() == i def test_sampler(): seq_sampler = gluon.data.SequentialSampler(10) assert list(seq_sampler) == list(range(10)) rand_sampler = gluon.data.RandomSampler(10) assert sorted(list(rand_sampler)) == list(range(10)) seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep') assert sum(list(seq_batch_keep), []) == list(range(10)) seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard') assert sum(list(seq_batch_discard), []) == list(range(9)) rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep') assert sorted(sum(list(rand_batch_keep), [])) == list(range(10)) def test_datasets(): assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000 assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000 assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist')) == 60000 assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False)) == 10000 assert len(gluon.data.vision.CIFAR10(root='data/cifar10')) == 50000 assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False)) == 10000 assert len(gluon.data.vision.CIFAR100(root='data/cifar100')) == 50000 assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000 assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000 def test_image_folder_dataset(): prepare_record() dataset = gluon.data.vision.ImageFolderDataset('data/test_images') assert dataset.synsets == ['test_images'] assert len(dataset.items) == 16 class Dataset(gluon.data.Dataset): def __len__(self): return 100 def __getitem__(self, key): return mx.nd.full((10,), key) @unittest.skip("Somehow fails with MKL. Cannot reproduce locally") def test_multi_worker(): data = Dataset() loader = gluon.data.DataLoader(data, batch_size=1, num_workers=5) for i, batch in enumerate(loader): assert (batch.asnumpy() == i).all() if __name__ == '__main__': import nose nose.runmodule()
apache-2.0
6,379,889,225,420,548,000
40.205357
103
0.678657
false
jumpstarter-io/keystone
keystone/auth/plugins/password.py
6
1707
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone import auth from keystone.auth import plugins as auth_plugins from keystone.common import dependency from keystone import exception from keystone.i18n import _ METHOD_NAME = 'password' LOG = log.getLogger(__name__) @dependency.requires('identity_api') class Password(auth.AuthMethodHandler): method = METHOD_NAME def authenticate(self, context, auth_payload, auth_context): """Try to authenticate against the identity backend.""" user_info = auth_plugins.UserAuthInfo.create(auth_payload, self.method) # FIXME(gyee): identity.authenticate() can use some refactoring since # all we care is password matches try: self.identity_api.authenticate( context, user_id=user_info.user_id, password=user_info.password) except AssertionError: # authentication failed because of invalid username or password msg = _('Invalid username or password') raise exception.Unauthorized(msg) auth_context['user_id'] = user_info.user_id
apache-2.0
4,995,053,922,877,340,000
33.836735
79
0.704745
false
tedelhourani/ansible
lib/ansible/modules/cloud/misc/serverless.py
26
6837
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Ryan Scott Brown <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: serverless short_description: Manages a Serverless Framework project description: - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks. version_added: "2.3" options: state: choices: ['present', 'absent'] description: - Goal state of given stage/project required: false default: present serverless_bin_path: description: - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless required: false version_added: "2.4" service_path: description: - The path to the root of the Serverless Service to be operated on. required: true stage: description: - The name of the serverless framework project stage to deploy to. This uses the serverless framework default "dev". required: false functions: description: - A list of specific functions to deploy. If this is not provided, all functions in the service will be deployed. required: false default: [] region: description: - AWS region to deploy the service to required: false default: us-east-1 deploy: description: - Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere. required: false default: true notes: - Currently, the `serverless` command must be in the path of the node executing the task. In the future this may be a flag. requirements: [ "serverless", "yaml" ] author: "Ryan Scott Brown @ryansb" ''' EXAMPLES = """ # Basic deploy of a service - serverless: service_path: '{{ project_dir }}' state: present # Deploy specific functions - serverless: service_path: '{{ project_dir }}' functions: - my_func_one - my_func_two # deploy a project, then pull its resource list back into Ansible - serverless: stage: dev region: us-east-1 service_path: '{{ project_dir }}' register: sls # The cloudformation stack is always named the same as the full service, so the # cloudformation_facts module can get a full list of the stack resources, as # well as stack events and outputs - cloudformation_facts: region: us-east-1 stack_name: '{{ sls.service_name }}' stack_resources: true # Deploy a project but use a locally installed serverless binary instead of the global serverless binary - serverless: stage: dev region: us-east-1 service_path: '{{ project_dir }}' serverless_bin_path: node_modules/.bin/serverless """ RETURN = """ service_name: type: string description: Most returned: always sample: my-fancy-service-dev state: type: string description: Whether the stack for the serverless project is present/absent. returned: always command: type: string description: Full `serverless` command run by this module, in case you want to re-run the command outside the module. returned: always sample: serverless deploy --stage production """ import os import traceback try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False from ansible.module_utils.basic import AnsibleModule def read_serverless_config(module): path = module.params.get('service_path') try: with open(os.path.join(path, 'serverless.yml')) as sls_config: config = yaml.safe_load(sls_config.read()) return config except IOError as e: module.fail_json(msg="Could not open serverless.yml in {}. err: {}".format(path, str(e)), exception=traceback.format_exc()) module.fail_json(msg="Failed to open serverless config at {}".format( os.path.join(path, 'serverless.yml'))) def get_service_name(module, stage): config = read_serverless_config(module) if config.get('service') is None: module.fail_json(msg="Could not read `service` key from serverless.yml file") if stage: return "{}-{}".format(config['service'], stage) return "{}-{}".format(config['service'], config.get('stage', 'dev')) def main(): module = AnsibleModule( argument_spec=dict( service_path = dict(required=True, type='path'), state = dict(default='present', choices=['present', 'absent'], required=False), functions = dict(type='list', required=False), region = dict(default='', required=False), stage = dict(default='', required=False), deploy = dict(default=True, type='bool', required=False), serverless_bin_path = dict(required=False, type='path') ), ) if not HAS_YAML: module.fail_json(msg='yaml is required for this module') service_path = module.params.get('service_path') state = module.params.get('state') functions = module.params.get('functions') region = module.params.get('region') stage = module.params.get('stage') deploy = module.params.get('deploy', True) serverless_bin_path = module.params.get('serverless_bin_path') if serverless_bin_path is not None: command = serverless_bin_path + " " else: command = "serverless " if state == 'present': command += 'deploy ' elif state == 'absent': command += 'remove ' else: module.fail_json(msg="State must either be 'present' or 'absent'. Received: {}".format(state)) if not deploy and state == 'present': command += '--noDeploy ' if region: command += '--region {} '.format(region) if stage: command += '--stage {} '.format(stage) rc, out, err = module.run_command(command, cwd=service_path) if rc != 0: if state == 'absent' and "-{}' does not exist".format(stage) in out: module.exit_json(changed=False, state='absent', command=command, out=out, service_name=get_service_name(module, stage)) module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err)) # gather some facts about the deployment module.exit_json(changed=True, state='present', out=out, command=command, service_name=get_service_name(module, stage)) if __name__ == '__main__': main()
gpl-3.0
-3,198,238,929,038,244,000
31.402844
152
0.655404
false
seims/SEIMS
preprocess/soil_param.py
2
27856
#! /usr/bin/env python # coding=utf-8 # # @Author: Junzhi Liu, 2013-1-10 # @Revised: Liang-Jun Zhu, Huiran Gao, Fang Shen # @Revised date: 2016-7-22 # @Note: 1. Names and units of soil physical parameter are referred to readsol.f, soil_par.f, and soil_phys.f in SWAT # 2. Data validation checking is also conducted here. # 3. Basic protocols: a. all names are Capitalized. b. output Geotiff # names may be appended in text.py from soil_texture import * from text import * from util import * # SEQN |None : Unique identifier of soil # SNAME |None : soil name # SOILLAYERS |None : (nly) number of soil layers # SOILDEPTH |mm : (sol_z) depth from the surface to bottom of soil layer # SOILTHICK |mm : soil thickness for calculation convenient # OM |% : organic matter content (weight percent) # SOL_CBN |% : (sol_cbn) percent organic carbon in soil layer # SOL_N |% : (sol_n) used when using CSWAT = 1, i.e, C-FARM one carbon pool model # CLAY |% : (sol_clay) percent clay content in soil material,diameter < 0.002 mm # SILT |% : (sol_silt) percent silt content in soil material,diameter between 0.002 mm and 0.05 mm # SAND |% : (sol_sand) percent sand content in soil material,diameter between 0.05 mm and 2 mm # ROCK |% : (sol_rock) percent of rock fragments content in soil material,diameter > 2 mm # SOIL_TEXTURE |None : soil texture # HYDRO_GROUP |None : Hydrological group, 1,2,3,and 4 to represent A,B,C,and D # SOL_ZMX |mm : (sol_zmx) maximum rooting depth of soil profile # ANION_EXCL |None : (anion_excl) fraction of porosity from which anions are excluded,default is 0.5 # SOL_CRK |None : (sol_crk) crack volume potential of soil expressed as a fraction of the total soil volume # DENSITY |Mg/m3 or g/cm3: (sol_bd) bulk density of each soil layer # SOL_AVBD |Mg/m3 or g/cm3: (sol_avbd) average bulk density for soil profile # CONDUCTIVITY |mm/hr : (sol_k) saturated hydraulic conductivity # SOL_HK |None : (sol_hk) beta coefficent to calculate hydraulic conductivity # WILTINGPOINT |mm H2O / mm soil: (sol_wp) water content of soil at -1.5 MPa (wilting point) # SOL_WPMM |mm H2O: (sol_wpmm) water content of soil at -1.5 MPa (wilting point) # SOL_SUMWP |mm H2O: (sol_sumwp) amount of water held in the soil profile at wilting point # FIELDCAP |mm H2O / mm soil : (sol_up) water content of soil at -0.033 MPa (field capacity) # AWC |mm H2O / mm soil : (sol_awc) available water capacity of soil layer # SOL_AWC |mm H2O: (sol_fc) amount of water available to plants in soil layer at field capacity (fc - wp) # SOL_SUMAWC |mm H2O: (sol_sumfc) amount of water held in soil profile at field capacity # POROSITY |None : (sol_por) total porosity of soil layer expressed as a fraction of the total volume # POREINDEX |None : pore size distribution index # SOL_AVPOR |None : (sol_avpor) average porosity for entire soil profile # SOL_UL |mm H2O: (sol_ul) amount of water held in the soil layer at saturation (sat - wp water) # SOL_SUMUL |mm H2O: (sol_sumul) amount of water held in soil profile at saturation # USLE_K |None : USLE K factor # SOL_ALB |None : albedo of top soil surface # WFSH |mm : wetting front matric potential (usde in Green-Ampt method) # ESCO |None : soil evaporation compensation factor # VWT |None : (vwt) variable water table factor, used in percolation modules # DET_SAND |None : (det_san) detached sediment size distribution, sand fraction # DET_SILT |None : (det_sil) detached sediment size distribution, silt fraction # DET_CLAY |None : (det_cla) detached sediment size distribution, clay fraction # DET_SMAGG |None : (det_sag) detached sediment size distribution, small aggregation fraction # DET_LGAGG |None : (det_lag) detached sediment size distribution, large aggregation fraction # CRDEP |mm : (crdep) maximum or potential crack volume # VOLCR |mm : (volcr) crack volume for soil layer, should # be calculated in SEIMS, using moist_ini # SOL_NO3 |kg/ha : (sol_no3) concentration of nitrate in soil layers # SOL_NH4 |kg/ha : (sol_nh4) concentration of ammonium-N in soil layers # SOL_ORGN |kg/ha : (sol_orgn) organic N concentration in soil layers # SOL_ORGP |kg/ha : (sol_orgp) organic P concentration in soil layers # SOL_SOLP |kg/ha : (sol_solp) soluble P concentration in soil layers class SoilProperty: ''' base class of Soil physical and general chemical properties :method: init(SEQN, SNAM) ''' def __init__(self, SEQN, SNAM): self.SEQN = SEQN self.SNAM = SNAM self.SOILLAYERS = DEFAULT_NODATA self.SOILDEPTH = [] self.SOILTHICK = [] self.OM = [] self.SOL_CBN = [] self.SOL_N = [] self.CLAY = [] self.SILT = [] self.SAND = [] self.ROCK = [] self.SOIL_TEXTURE = DEFAULT_NODATA self.HYDRO_GROUP = DEFAULT_NODATA self.SOL_ZMX = DEFAULT_NODATA self.ANION_EXCL = DEFAULT_NODATA self.SOL_CRK = DEFAULT_NODATA self.DENSITY = [] self.SOL_AVBD = [] self.CONDUCTIVITY = [] self.SOL_HK = [] self.WILTINGPOINT = [] self.SOL_WPMM = [] self.SOL_SUMWP = 0. self.FIELDCAP = [] self.AWC = [] self.SOL_AWC = [] self.SOL_SUMAWC = 0. self.POROSITY = [] self.POREINDEX = [] self.SOL_AVPOR = DEFAULT_NODATA self.SOL_UL = [] self.SOL_SUMUL = 0. self.USLE_K = [] self.SOL_ALB = DEFAULT_NODATA self.WFSH = DEFAULT_NODATA self.VWT = [] self.DET_SAND = DEFAULT_NODATA self.DET_SILT = DEFAULT_NODATA self.DET_CLAY = DEFAULT_NODATA self.DET_SMAGG = DEFAULT_NODATA self.DET_LGAGG = DEFAULT_NODATA self.CRDEP = [] self.ESCO = DEFAULT_NODATA # Here after are general soil chemical properties self.SOL_NO3 = [] self.SOL_NH4 = [] self.SOL_ORGN = [] self.SOL_ORGP = [] self.SOL_SOLP = [] def SoilDict(self): solDict = self.__dict__ solDict.pop(SOL_NAME) # remove the empty element for ele in solDict.keys(): if solDict[ele] == []: solDict.pop(ele) # print solDict return solDict def CheckData(self): # check the required input, and calculate all physical and general chemical properties # set a soil layer at dep_new and adjust all lower layers # a septic layer:0-10mm,accordig to swat layersplit.f if self.SOILLAYERS == DEFAULT_NODATA: raise ValueError( "Soil layers number must be provided, please check the input file!") dep_new = 10. if self.SOILDEPTH[0] - dep_new >= 10.: self.SOILLAYERS += 1 self.SOILDEPTH.insert(0, dep_new) if self.OM != []: self.OM.insert(0, self.OM[0]) else: raise ValueError("Organic matter must be provided!") if self.CLAY != []: self.CLAY.insert(0, self.CLAY[0]) else: raise ValueError("Clay content must be provided!") if self.SILT != []: self.SILT.insert(0, self.SILT[0]) else: raise ValueError("Silt content must be provided!") if self.SAND != []: self.SAND.insert(0, self.SAND[0]) else: raise ValueError("Sand content must be provided!") if self.ROCK != []: self.ROCK.insert(0, self.ROCK[0]) else: raise ValueError("Rock content must be provided!") if self.FIELDCAP != []: self.FIELDCAP.insert(0, self.FIELDCAP[0]) else: raise ValueError("Available water capacity must be provided!") if self.DENSITY != []: self.DENSITY.insert(0, self.DENSITY[0]) else: raise ValueError("Bulk density must be provided!") if self.CONDUCTIVITY != []: self.CONDUCTIVITY.insert(0, self.CONDUCTIVITY[0]) if self.WILTINGPOINT != []: self.WILTINGPOINT.insert(0, self.WILTINGPOINT[0]) if self.AWC != []: self.AWC.insert(0, self.AWC[0]) for i in range(self.SOILLAYERS): if self.AWC[i] <= 0.: self.AWC[i] = 0.005 elif self.AWC[i] <= 0.01: self.AWC[i] = 0.01 elif self.AWC[i] >= 0.8: self.AWC[i] = 0.8 if self.POROSITY != []: self.POROSITY.insert(0, self.POROSITY[0]) if self.USLE_K != []: self.USLE_K.insert(0, self.USLE_K[0]) if self.SOL_NO3 != []: self.SOL_NO3.insert(0, self.SOL_NO3[0]) else: self.SOL_NO3 = list(numpy.zeros(self.SOILLAYERS)) if self.SOL_NH4 != []: self.SOL_NH4.insert(0, self.SOL_NH4[0]) else: self.SOL_NH4 = list(numpy.zeros(self.SOILLAYERS)) if self.SOL_ORGN != []: self.SOL_ORGN.insert(0, self.SOL_ORGN[0]) else: self.SOL_ORGN = list(numpy.zeros(self.SOILLAYERS)) if self.SOL_SOLP != []: self.SOL_SOLP.insert(0, self.SOL_SOLP[0]) else: self.SOL_SOLP = list(numpy.zeros(self.SOILLAYERS)) if self.SOL_ORGP != []: self.SOL_ORGP.insert(0, self.SOL_ORGP[0]) else: self.SOL_ORGP = list(numpy.zeros(self.SOILLAYERS)) if self.SOILDEPTH == [] or len(self.SOILDEPTH) != self.SOILLAYERS or DEFAULT_NODATA in self.SOILDEPTH: raise IndexError( "Soil depth must have a size equal to NLAYERS and should not include NODATA (-9999)!") # Calculate soil thickness of each layer for l in range(self.SOILLAYERS): if l == 0: self.SOILTHICK.append(self.SOILDEPTH[l]) else: self.SOILTHICK.append( self.SOILDEPTH[l] - self.SOILDEPTH[l - 1]) if self.SOL_ZMX == DEFAULT_NODATA or self.SOL_ZMX > self.SOILDEPTH[-1]: self.SOL_ZMX = self.SOILDEPTH[-1] if self.ANION_EXCL == DEFAULT_NODATA: self.ANION_EXCL = 0.5 if self.OM == [] or len(self.OM) != self.SOILLAYERS: raise IndexError( "Soil organic matter must have a size equal to NLAYERS!") elif DEFAULT_NODATA in self.OM and self.OM.index(DEFAULT_NODATA) >= 2 and self.SOILLAYERS >= 3: for i in range(2, self.SOILLAYERS): if self.OM[i] == DEFAULT_NODATA: self.OM[i] = self.OM[i - 1] * \ numpy.exp(-self.SOILTHICK[i]) # mm # Calculate sol_cbn = sol_om * 0.58 if self.SOL_CBN == [] or len(self.SOL_CBN) != self.SOILLAYERS: self.SOL_CBN = [] for i in range(self.SOILLAYERS): if self.OM[i] * 0.58 < UTIL_ZERO: self.SOL_CBN.append(0.1) else: self.SOL_CBN.append(self.OM[i] * 0.58) # Calculate sol_n = sol_cbn/11. if self.SOL_N == [] or len(self.SOL_N) != self.SOILLAYERS: self.SOL_N = [] for i in range(self.SOILLAYERS): self.SOL_N.append(self.SOL_CBN[i] / 11.) if self.CLAY == [] or len(self.CLAY) != self.SOILLAYERS or DEFAULT_NODATA in self.CLAY: raise IndexError( "Clay content must have a size equal to NLAYERS and should not include NODATA (-9999)!") if self.SILT == [] or len(self.SILT) != self.SOILLAYERS or DEFAULT_NODATA in self.SILT: raise IndexError( "Silt content must have a size equal to NLAYERS and should not include NODATA (-9999)!") if self.SAND == [] or len(self.SAND) != self.SOILLAYERS or DEFAULT_NODATA in self.SAND: raise IndexError( "Sand content must have a size equal to NLAYERS and should not include NODATA (-9999)!") if self.ROCK == [] or len(self.ROCK) != self.SOILLAYERS or DEFAULT_NODATA in self.ROCK: raise IndexError( "Rock content must have a size equal to NLAYERS and should not include NODATA (-9999)!") # temperory variables tmp_fc = [] tmp_sat = [] tmp_wp = [] for i in range(self.SOILLAYERS): s = self.SAND[i] * 0.01 # % -> decimal c = self.CLAY[i] * 0.01 om = self.OM[i] wpt = -0.024 * s + 0.487 * c + 0.006 * om + 0.005 * \ s * om - 0.013 * c * om + 0.068 * s * c + 0.031 tmp_wp.append(1.14 * wpt - 0.02) fct = -0.251 * s + 0.195 * c + 0.011 * om + 0.006 * \ s * om - 0.027 * c * om + 0.452 * s * c + 0.299 fc = fct + 1.283 * fct * fct - 0.374 * fct - 0.015 s33t = 0.278 * s + 0.034 * c + 0.022 * om - 0.018 * \ s * om - 0.027 * c * om - 0.584 * s * c + 0.078 s33 = 1.636 * s33t - 0.107 sat = fc + s33 - 0.097 * s + 0.043 tmp_fc.append(fc) tmp_sat.append(sat) if self.WILTINGPOINT != [] and len(self.WILTINGPOINT) != self.SOILLAYERS: raise IndexError( "Wilting point must have a size equal to soil layers number!") elif self.WILTINGPOINT == []: self.WILTINGPOINT = tmp_wp[:] elif DEFAULT_NODATA in self.WILTINGPOINT: for i in range(self.SOILLAYERS): if self.WILTINGPOINT[i] == DEFAULT_NODATA: self.WILTINGPOINT[i] = tmp_wp[i] if self.DENSITY != [] and len(self.DENSITY) != self.SOILLAYERS: raise IndexError( "Bulk density must have a size equal to soil layers number!") elif self.DENSITY == [] or DEFAULT_NODATA in self.DENSITY: tmp_bd = [] for i in range(self.SOILLAYERS): sat = tmp_sat[i] fc = tmp_fc[i] if self.FIELDCAP != [] and len(self.FIELDCAP) == self.SOILLAYERS: sat = sat - fc + self.FIELDCAP[i] tmp_bd.append(2.65 * (1.0 - sat)) if DEFAULT_NODATA in self.DENSITY: for i in range(self.SOILLAYERS): if self.DENSITY[i] == DEFAULT_NODATA: self.DENSITY[i] = tmp_bd[i] elif self.DENSITY == []: self.DENSITY = tmp_bd[:] if self.FIELDCAP != [] and len(self.FIELDCAP) != self.SOILLAYERS: raise IndexError( "Field capacity must have a size equal to soil layers number!") elif self.FIELDCAP == [] or DEFAULT_NODATA in self.FIELDCAP: tmp_fc_bdeffect = [] for i in range(self.SOILLAYERS): fc = tmp_fc[i] sat = tmp_sat[i] if self.DENSITY != [] and len(self.DENSITY) == self.SOILLAYERS: p_df = self.DENSITY[i] else: p_df = 2.65 * (1.0 - sat) sat_df = 1. - p_df / 2.65 tmp_fc_bdeffect.append(fc - 0.2 * (sat - sat_df)) if DEFAULT_NODATA in self.FIELDCAP: for i in range(self.SOILLAYERS): if self.FIELDCAP[i] == DEFAULT_NODATA: self.FIELDCAP[i] = tmp_fc_bdeffect[i] elif self.FIELDCAP == []: self.FIELDCAP = tmp_fc_bdeffect[:] if self.AWC != [] and len(self.AWC) != self.SOILLAYERS: raise IndexError( "Available water capacity must have the size equal to soil layers number!") elif self.AWC == []: for i in range(self.SOILLAYERS): self.AWC.append(self.FIELDCAP[i] - self.WILTINGPOINT[i]) elif DEFAULT_NODATA in self.AWC: for i in range(self.SOILLAYERS): if self.AWC[i] == DEFAULT_NODATA: self.AWC[i] = self.FIELDCAP[i] - self.WILTINGPOINT[i] if self.POREINDEX != [] and len(self.POREINDEX) != self.SOILLAYERS: raise IndexError( "Pore disconnectedness index must have a size equal to soil layers number!") elif self.POREINDEX == []: for i in range(self.SOILLAYERS): # An fitted equation proposed by Cosby et al. (1984) is # adopted. By LJ, 2016-9-22 b = 0.159 * self.CLAY[i] + 2.91 self.POREINDEX.append(b) # previous version, currently deprecated by LJ # fc = self.FIELDCAP[i] # wp = self.WILTINGPOINT[i] # b = (math.log(1500.) - math.log(33.)) / (math.log(fc) - math.log(wp)) # self.POREINDEX.append(1.0 / b) if self.POROSITY != [] and len(self.POROSITY) != self.SOILLAYERS: raise IndexError( "Soil Porosity must have a size equal to soil layers number!") elif self.POROSITY == []: for i in range(self.SOILLAYERS): # from the theroy of swat self.POROSITY.append(1. - self.DENSITY[i] / 2.65) elif DEFAULT_NODATA in self.POROSITY: for i in range(self.SOILLAYERS): if self.POROSITY[i] == DEFAULT_NODATA: self.POROSITY[i] = 1. - self.DENSITY[i] / 2.65 if self.CONDUCTIVITY != [] and len(self.CONDUCTIVITY) != self.SOILLAYERS: raise IndexError( "Saturated conductivity must have a size equal to soil layers number!") elif self.CONDUCTIVITY == [] or DEFAULT_NODATA in self.CONDUCTIVITY: tmp_k = [] for i in range(self.SOILLAYERS): lamda = self.POREINDEX[i] fc = tmp_fc[i] sat = tmp_sat[i] tmp_k.append(1930. * pow(sat - fc, 3. - lamda)) if self.CONDUCTIVITY == []: self.CONDUCTIVITY = tmp_k[:] elif DEFAULT_NODATA in self.CONDUCTIVITY: for i in range(self.SOILLAYERS): if self.CONDUCTIVITY[i] == DEFAULT_NODATA: self.CONDUCTIVITY[i] = tmp_k[i] # calculate water content of soil at -1.5 MPa and -0.033 MPa # (soil_phys.f) if self.WILTINGPOINT == []: for i in range(self.SOILLAYERS): tmpwp = 0.4 * self.CLAY[i] * self.DENSITY[i] / 100. if tmpwp <= 0.: tmpwp = 0.005 self.WILTINGPOINT.append(tmpwp) # calculate field capcity (sol_up) if self.FIELDCAP == []: for i in range(self.SOILLAYERS): self.FIELDCAP.append(self.WILTINGPOINT[i] + self.AWC[i]) # calculate porosity if self.POROSITY == []: for i in range(self.SOILLAYERS): self.POROSITY.append(1. - self.DENSITY[i] / 2.65) if self.SOL_CRK == DEFAULT_NODATA: self.SOL_CRK = numpy.mean(self.POROSITY) for i in range(self.SOILLAYERS): if self.FIELDCAP[i] >= self.POROSITY[i]: self.FIELDCAP[i] = self.POROSITY[i] - 0.05 self.WILTINGPOINT[i] = self.FIELDCAP[i] - self.AWC[i] if self.WILTINGPOINT[i] <= 0.: self.FIELDCAP[i] = self.POROSITY[i] * 0.75 self.WILTINGPOINT[i] = self.POROSITY[i] * 0.25 # compute drainable porosity and variable water table factor drpor = self.POROSITY[i] - self.FIELDCAP[i] self.VWT.append((437.13 * drpor * drpor) - (95.08 * drpor) + 8.257) sa = self.SAND[0] / 100. cl = self.CLAY[0] / 100. si = self.SILT[0] / 100. # determine detached sediment size distribution typical for mid-western soils in USA (Foster et al., 1980) # Based on SWRRB. self.DET_SAND = 2.49 * sa * (1. - cl) self.DET_SILT = 0.13 * si self.DET_CLAY = 0.2 * cl if cl < 0.25: self.DET_SMAGG = 2.0 * cl elif cl > 0.5: self.DET_SMAGG = 0.57 else: self.DET_SMAGG = 0.28 * (cl - 0.25) + 0.5 self.DET_LGAGG = 1. - self.DET_SAND - \ self.DET_SILT - self.DET_CLAY - self.DET_SMAGG # Error check, may happen for soils with more sand. The fraction wont # add upto 1.0 if self.DET_LGAGG < 0.: self.DET_SAND /= (1. - self.DET_LGAGG) self.DET_SILT /= (1. - self.DET_LGAGG) self.DET_CLAY /= (1. - self.DET_LGAGG) self.DET_SMAGG /= (1. - self.DET_LGAGG) self.DET_LGAGG = 0. # initialize water/drainage coefs for each soil layer sumpor = 0. self.SOL_SUMUL = 0. self.SOL_SUMAWC = 0. self.SOL_SUMWP = 0. for i in range(self.SOILLAYERS): pormm = self.POROSITY[i] * self.SOILTHICK[i] sumpor += pormm self.SOL_UL.append( (self.POROSITY[i] - self.WILTINGPOINT[i]) * self.SOILTHICK[i]) self.SOL_SUMUL += self.SOL_UL[i] self.SOL_AWC.append( (self.FIELDCAP[i] - self.WILTINGPOINT[i]) * self.SOILTHICK[i]) self.SOL_SUMAWC += self.SOL_AWC[i] self.SOL_HK.append( (self.SOL_UL[i] - self.SOL_AWC[i]) / self.CONDUCTIVITY[i]) if self.SOL_HK[i] < 1.: self.SOL_HK[i] = 1. self.SOL_WPMM.append(self.WILTINGPOINT[i] * self.SOILTHICK[i]) self.SOL_SUMWP += self.SOL_WPMM[i] self.CRDEP.append( self.SOL_CRK * 0.916 * math.exp(-0.0012 * self.SOILDEPTH[i]) * self.SOILTHICK[i]) self.SOL_AVPOR = sumpor / self.SOILDEPTH[i] self.SOL_AVBD = 2.65 * (1. - self.SOL_AVPOR) # calculate infiltration parameters for subdaily time step self.WFSH = 10. * math.exp(6.5309 - 7.32561 * self.POROSITY[0] + 3.809479 * math.pow(self.POROSITY[0], 2) + 0.001583 * math.pow(self.CLAY[0], 2) + 0.000344 * self.SAND[0] * self.CLAY[ 0] - 0.049837 * self.POROSITY[0] * self.SAND[0] + 0.001608 * math.pow(self.POROSITY[0], 2) * math.pow(self.SAND[0], 2) + 0.001602 * math.pow(self.POROSITY[0], 2) * math.pow( self.CLAY[0], 2) - 0.0000136 * math.pow(self.SAND[0], 2) * self.CLAY[0] - 0.003479 * math.pow( self.CLAY[0], 2) * self.POROSITY[0] - 0.000799 * math.pow(self.SAND[0], 2) * self.POROSITY[0]) if self.SOL_ALB == DEFAULT_NODATA: self.SOL_ALB = 0.2227 * math.exp(-1.8672 * self.SOL_CBN[0]) if self.ESCO == DEFAULT_NODATA: self.ESCO = 0.95 if self.USLE_K != [] and len(self.USLE_K) != self.SOILLAYERS: raise IndexError( "USLE K factor must have a size equal to NLAYERS!") elif self.USLE_K == [] or DEFAULT_NODATA in self.USLE_K: tmp_usle_k = [] for i in range(self.SOILLAYERS): # According to Liu BY et al., (1999) sand = self.SAND[i] silt = self.SILT[i] clay = self.CLAY[i] cbn = self.OM[i] * 0.58 sn = 1. - sand * 0.01 a = (0.2 + 0.3 * math.exp(-0.0256 * sand * (1. - silt * 0.01))) b = math.pow(silt / (clay + silt), 0.3) c = (1. - 0.25 * cbn / (cbn + math.exp(3.72 - 2.95 * cbn))) d = (1. - 0.25 * sn / (sn + math.exp(-5.51 + 22.9 * sn))) k = a * b * c * d tmp_usle_k.append(k) if self.USLE_K == []: self.USLE_K = tmp_usle_k[:] elif DEFAULT_NODATA in self.USLE_K: for i in range(self.SOILLAYERS): if self.USLE_K[i] == DEFAULT_NODATA: self.USLE_K[i] = tmp_usle_k[i] if self.SOIL_TEXTURE == DEFAULT_NODATA or self.HYDRO_GROUP == DEFAULT_NODATA: st, hg, uslek = GetTexture( self.CLAY[0], self.SILT[0], self.SAND[0]) self.SOIL_TEXTURE = st self.HYDRO_GROUP = hg # Unit conversion for general soil chemical properties wt1 = [] for j in range(self.SOILLAYERS): # g/kg => kg/ha wt1.append(self.DENSITY[j] * self.SOILTHICK[j] * 10.) if self.SOL_NO3 != [] and len(self.SOL_NO3) == self.SOILLAYERS: for j in range(self.SOILLAYERS): self.SOL_NO3[j] = self.SOL_NO3[j] * wt1[j] if self.SOL_NH4 != [] and len(self.SOL_NH4) == self.SOILLAYERS: for j in range(self.SOILLAYERS): self.SOL_NH4[j] = self.SOL_NH4[j] * wt1[j] if self.SOL_ORGN != [] and len(self.SOL_ORGN) == self.SOILLAYERS: for j in range(self.SOILLAYERS): self.SOL_ORGN[j] = self.SOL_ORGN[j] * wt1[j] if self.SOL_SOLP != [] and len(self.SOL_SOLP) == self.SOILLAYERS: for j in range(self.SOILLAYERS): self.SOL_SOLP[j] = self.SOL_SOLP[j] * wt1[j] if self.SOL_ORGP != [] and len(self.SOL_ORGP) == self.SOILLAYERS: for j in range(self.SOILLAYERS): self.SOL_ORGP[j] = self.SOL_ORGP[j] * wt1[j] # Calculate soil properties from sand, clay and organic matter. # TODO, add reference. def GetProperties(s, c, om): # wilting point (SOL_WP) wpt = -0.024 * s + 0.487 * c + 0.006 * om + 0.005 * \ s * om - 0.013 * c * om + 0.068 * s * c + 0.031 wp = 1.14 * wpt - 0.02 # bulk density according to field capacity fct = -0.251 * s + 0.195 * c + 0.011 * om + 0.006 * \ s * om - 0.027 * c * om + 0.452 * s * c + 0.299 fc = fct + 1.283 * fct * fct - 0.374 * fct - 0.015 s33t = 0.278 * s + 0.034 * c + 0.022 * om - 0.018 * \ s * om - 0.027 * c * om - 0.584 * s * c + 0.078 s33 = 1.636 * s33t - 0.107 sat = fc + s33 - 0.097 * s + 0.043 pn = 2.65 * (1.0 - sat) # field capacity (SOL_FC) with density effects (df) p_df = pn sat_df = 1. - p_df / 2.65 # porosity fc_df = fc - 0.2 * (sat - sat_df) # available water capacity (SOL_AWC) awc = fc_df - wp # pore disconnectedness index b = (math.log(1500.) - math.log(33.)) / (math.log(fc) - math.log(wp)) lamda = 1.0 / b # saturated conductivity # print s, c, sat, fc, 3-lamda ks = 1930 * pow(sat - fc, 3. - lamda) # print wp, fc_df, awc, return wp, fc_df, sat_df, p_df, ks, lamda # "WILTINGPOINT", "FIELDCAP", "Porosity","DENSITY","CONDUCTIVITY", "POREINDEX" def GetValue(geoMask, geoMap, data, i, j): # pGeo = Proj("+proj=longlat +ellps=krass +no_defs") # pAlbers = Proj("+proj=aea +ellps=krass +lon_0=105 +lat_0=0 +lat_1=25 +lat_2=47") # xMask = geoMask[0] + (j+0.5)*geoMask[1] # yMask = geoMask[3] + (i+0.5)*geoMask[5] # xMap, yMap = transform(pAlbers, pGeo, xMask, yMask) xMap = geoMask[0] + (j + 0.5) * geoMask[1] yMap = geoMask[3] + (i + 0.5) * geoMask[5] jMap = (xMap - geoMap[0]) / geoMap[1] iMap = (yMap - geoMap[3]) / geoMap[5] return data[iMap][jMap]
gpl-2.0
-2,608,813,226,555,334,000
47.445217
118
0.53213
false
QJonny/CyNest
extras/ConnPlotter/build/lib.linux-x86_64-2.7/ConnPlotter/colormaps.py
4
7098
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices # # This file is part of ConnPlotter. # # Copyright (C) 2009 Hans Ekkehard Plesser/UMB # # ConnPlotter is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # ConnPlotter is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ConnPlotter. If not, see <http://www.gnu.org/licenses/>. """ Colormaps for ConnPlotter. Provides the following functions and colormaps: - make_colormap: based on color specification, create colormap running from from white to fully saturated color - redblue: from fully saturated red to white to fully saturated blue - bluered: from fully saturated blue to white to fully saturated red For all colormaps, "bad" values (NaN) are mapped to white. Provides also ZeroCenterNorm, mapping negative values to 0..0.5, positive to 0.5..1. """ # ---------------------------------------------------------------------------- __version__ = '$Revision: 503 $' __date__ = '$Date: 2009-12-02 15:13:42 +0100 (Wed, 02 Dec 2009) $' __author__ = 'Hans Ekkehard Plesser' __all__ = ['ZeroCenterNorm', 'make_colormap', 'redblue', 'bluered', 'bad_color'] # ---------------------------------------------------------------------------- import matplotlib.pyplot as plt import matplotlib.colors as mc import matplotlib.cbook as cbook import numpy as np # ---------------------------------------------------------------------------- bad_color = (1.0, 1.0, 0.9) # ---------------------------------------------------------------------------- class ZeroCenterNorm(mc.Normalize): """ Normalize so that value 0 is always at 0.5. Code from matplotlib.colors.Normalize. Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved http://matplotlib.sourceforge.net/users/license.html """ # ------------------------------------------------------------------------ def __call__(self, value, clip=None): """ Normalize given values to [0,1]. Returns data in same form as passed in. value can be scalar or array. """ if clip is not None and clip is not False: assert(False) # clip not supported if cbook.iterable(value): vtype = 'array' val = np.ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = np.ma.array([value]).astype(np.float) self.autoscale_None(val) self.vmin = min(0, self.vmin) self.vmax = max(0, self.vmax) # imshow expects masked arrays # fill entire array with 0.5 result = np.ma.array(0.5 * np.ma.asarray(np.ones(np.shape(val))), dtype=np.float, mask=val.mask) # change values != 0 result[val < 0] = 0.5 * (self.vmin - val[val<0]) / self.vmin result[val > 0] = 0.5 + 0.5 * val[val>0] / self.vmax if vtype == 'scalar': result = result[0] return result # ------------------------------------------------------------------------ def inverse(self, value): """ Invert color map. Required by colorbar(). """ if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax if cbook.iterable(value): val = np.asarray(value) res = np.zeros(np.shape(val)) res[val < 0.5] = vmin - 2 * vmin * val[val < 0.5] res[val > 0.5] = 2 * (val[val > 0.5]-0.5) * vmax return res else: if value == 0.5: return 0 elif value < 0.5: return vmin - 2 * vmin * value # vmin < 0 else: return 2 * (value-0.5) * vmax # ---------------------------------------------------------------------------- def make_colormap(color): """ Create LinearSegmentedColormap ranging from white to the given color. Color can be given in any legal color format. Bad color is set to white. """ try: r, g, b = mc.colorConverter.to_rgb(color) except: raise ValueError('Illegal color specification: %s' % color.__repr__) cm = mc.LinearSegmentedColormap(color.__str__(), {'red' : [(0.0, 1.0, 1.0), (1.0, r, r) ], 'green': [(0.0, 1.0, 1.0), (1.0, g, g) ], 'blue' : [(0.0, 1.0, 1.0), (1.0, b, b) ]}) cm.set_bad(color = bad_color) # light yellow return cm # ---------------------------------------------------------------------------- redblue = mc.LinearSegmentedColormap('redblue', {'red': [(0.0,0.0,1.0), (0.5,1.0,1.0), (1.0,0.0,0.0)], 'green': [(0.0,0.0,0.0), (0.5,1.0,1.0), (1.0,0.0,0.0)], 'blue': [(0.0,0.0,0.0), (0.5,1.0,1.0), (1.0,1.0,1.0)]}) redblue.set_bad(color = bad_color) # ---------------------------------------------------------------------------- bluered = mc.LinearSegmentedColormap('bluered', {'red': [(0.0,0.0,0.0), (0.5,1.0,1.0), (1.0,1.0,1.0)], 'green': [(0.0,0.0,0.0), (0.5,1.0,1.0), (1.0,0.0,0.0)], 'blue': [(0.0,1.0,1.0), (0.5,1.0,1.0), (1.0,0.0,0.0)]}) bluered.set_bad(color = bad_color) # ---------------------------------------------------------------------------- if __name__ == '__main__': # this should be proper unit tests n1 = ZeroCenterNorm() if (n1([-1, -0.5, 0.0, 0.5, 1.0]).data == np.array([0, 0.25, 0.5, 0.75, 1.0])).all(): print "n1 ok" else: print "n1 failed." n2 = ZeroCenterNorm(-1, 2) if (n2([-1, -0.5, 0.0, 1.0, 2.0]).data == np.array([0, 0.25, 0.5, 0.75, 1.0])).all(): print "n2 ok" else: print "n2 failed."
gpl-2.0
1,749,659,060,957,901,000
34.848485
89
0.434348
false
nvoron23/hue
desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/bbcode.py
75
3314
# -*- coding: utf-8 -*- """ pygments.formatters.bbcode ~~~~~~~~~~~~~~~~~~~~~~~~~~ BBcode formatter. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.formatter import Formatter from pygments.util import get_bool_opt __all__ = ['BBCodeFormatter'] class BBCodeFormatter(Formatter): """ Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there. This formatter has no support for background colors and borders, as there are no common BBcode tags for that. Some board systems (e.g. phpBB) don't support colors in their [code] tag, so you can't use the highlighting together with that tag. Text in a [code] tag usually is shown with a monospace font (which this formatter can do with the ``monofont`` option) and no spaces (which you need for indentation) are removed. Additional options accepted: `style` The style to use, can be a string or a Style subclass (default: ``'default'``). `codetag` If set to true, put the output into ``[code]`` tags (default: ``false``) `monofont` If set to true, add a tag to show the code with a monospace font (default: ``false``). """ name = 'BBCode' aliases = ['bbcode', 'bb'] filenames = [] def __init__(self, **options): Formatter.__init__(self, **options) self._code = get_bool_opt(options, 'codetag', False) self._mono = get_bool_opt(options, 'monofont', False) self.styles = {} self._make_styles() def _make_styles(self): for ttype, ndef in self.style: start = end = '' if ndef['color']: start += '[color=#%s]' % ndef['color'] end = '[/color]' + end if ndef['bold']: start += '[b]' end = '[/b]' + end if ndef['italic']: start += '[i]' end = '[/i]' + end if ndef['underline']: start += '[u]' end = '[/u]' + end # there are no common BBcodes for background-color and border self.styles[ttype] = start, end def format_unencoded(self, tokensource, outfile): if self._code: outfile.write('[code]') if self._mono: outfile.write('[font=monospace]') lastval = '' lasttype = None for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent if ttype == lasttype: lastval += value else: if lastval: start, end = self.styles[lasttype] outfile.write(''.join((start, lastval, end))) lastval = value lasttype = ttype if lastval: start, end = self.styles[lasttype] outfile.write(''.join((start, lastval, end))) if self._mono: outfile.write('[/font]') if self._code: outfile.write('[/code]') if self._code or self._mono: outfile.write('\n')
apache-2.0
5,246,087,543,863,821,000
29.40367
78
0.539529
false
kamawanu/pydbgr
trepan/interface.py
2
2837
# -*- coding: utf-8 -*- # Copyright (C) 2010, 2013 Rocky Bernstein <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """A base class for a debugger interface.""" import sys NotImplementedMessage = "This method must be overriden in a subclass" __all__ = ['DebuggerInterface'] class DebuggerInterface: """ A debugger interface handles the communication or interaction with between the program and the outside portion which could be - a user, - a front-end that talks to a user, or - another interface in another process or computer """ def __init__(self, inp=None, out=None): self.input = inp or sys.stdin self.output = out or sys.stdout self.interactive = False return def close(self): """ Closes all input and/or output """ raise NotImplementedError, NotImplementedMessage return def confirm(self, prompt, default): """ Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.""" raise NotImplementedError, NotImplementedMessage def errmsg(self, str, prefix="** "): """Common routine for reporting debugger error messages. """ raise NotImplementedError, NotImplementedMessage def finalize(self, last_wishes=None): raise NotImplementedError, NotImplementedMessage def msg(self, msg): """ used to write to a debugger that is connected to this server; `str' written will have a newline added to it """ if hasattr(self.output, 'writeline'): self.output.writeline(msg) elif hasattr(self.output, 'writelines'): self.output.writelines(msg + "\n") pass return def msg_nocr(self, msg): """ used to write to a debugger that is connected to this server; `str' written will not have a newline added to it """ self.output.write(msg) return def read_command(self, prompt): raise NotImplementedError, NotImplementedMessage def readline(self, prompt, add_to_history=True): raise NotImplementedError, NotImplementedMessage pass
gpl-3.0
7,304,066,626,211,151,000
34.024691
75
0.667254
false
2ndQuadrant/ansible
lib/ansible/modules/network/iosxr/iosxr_logging.py
45
30474
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: iosxr_logging version_added: "2.4" author: - "Trishna Guha (@trishnaguha)" - "Kedar Kekan (@kedarX)" short_description: Configuration management of system logging services on network devices description: - This module provides declarative management configuration of system logging (syslog) on Cisco IOS XR devices. notes: - Tested against IOS XRv 6.1.2 options: dest: description: - Destination for system logging (syslog) messages. choices: ['host', 'console', 'monitor', 'buffered', 'file'] name: description: - When C(dest) = I(file) name indicates file-name - When C(dest) = I(host) name indicates the host-name or ip-address of syslog server. vrf: description: - vrf name when syslog server is configured, C(dest) = C(host) default: default version_added: 2.5 size: description: - Size of buffer when C(dest) = C(buffered). The acceptable value is in the range I(307200 to 125000000 bytes). Default 307200 - Size of file when C(dest) = C(file). The acceptable value is in the range I(1 to 2097152)KB. Default 2 GB facility: description: - To configure the type of syslog facility in which system logging (syslog) messages are sent to syslog servers Optional config for C(dest) = C(host) default: local7 hostnameprefix: description: - To append a hostname prefix to system logging (syslog) messages logged to syslog servers. Optional config for C(dest) = C(host) version_added: 2.5 level: description: - Specifies the severity level for the logging. default: debugging aliases: ['severity'] aggregate: description: List of syslog logging configuration definitions. state: description: - Existential state of the logging configuration on the node. default: present choices: ['present', 'absent'] extends_documentation_fragment: iosxr """ EXAMPLES = """ - name: configure logging for syslog server host iosxr_logging: dest: host name: 10.10.10.1 level: critical state: present - name: add hostnameprefix configuration iosxr_logging: hostnameprefix: host1 state: absent - name: add facility configuration iosxr_logging: facility: local1 state: present - name: configure console logging level iosxr_logging: dest: console level: debugging state: present - name: configure monitor logging level iosxr_logging: dest: monitor level: errors state: present - name: configure syslog to a file iosxr_logging: dest: file name: file_name size: 2048 level: errors state: present - name: configure buffered logging with size iosxr_logging: dest: buffered size: 5100000 - name: Configure logging using aggregate iosxr_logging: aggregate: - { dest: console, level: warning } - { dest: buffered, size: 4800000 } - { dest: file, name: file3, size: 2048} - { dest: host, name: host3, level: critical} - name: Delete logging using aggregate iosxr_logging: aggregate: - { dest: console, level: warning } - { dest: buffered, size: 4800000 } - { dest: file, name: file3, size: 2048} - { dest: host, name: host3, level: critical} state: absent """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always (empty list when no commands to send) type: list sample: - logging 10.10.10.1 vrf default severity debugging - logging facility local7 - logging hostnameprefix host1 - logging console critical - logging buffered 2097153 - logging buffered warnings - logging monitor errors - logging file log_file maxfilesize 1024 severity info xml: description: NetConf rpc xml sent to device with transport C(netconf) returned: always (empty list when no xml rpc to send) type: list version_added: 2.5 sample: - '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0"> <syslog xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-infra-syslog-cfg"> <files> <file xc:operation="delete"> <file-name>file1</file-name> <file-log-attributes> <max-file-size>2097152</max-file-size> <severity>2</severity> </file-log-attributes> </file> </files> </syslog> </config>' """ import re import collections from copy import deepcopy from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.iosxr.iosxr import get_config, load_config, build_xml from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec, etree_findall from ansible.module_utils.network.iosxr.iosxr import is_netconf, is_cliconf, etree_find from ansible.module_utils.network.common.utils import remove_default_spec severity_level = {'emergency': '0', 'alert': '1', 'critical': '2', 'error': '3', 'warning': '4', 'notice': '5', 'info': '6', 'debug': '7', 'disable': '15'} severity_transpose = {'emergencies': 'emergency', 'alerts': 'alert', 'critical': 'critical', 'errors': 'error', 'warning': 'warning', 'notifications': 'notice', 'informational': 'info', 'debugging': 'debug'} class ConfigBase(object): def __init__(self, module): self._flag = None self._module = module self._result = {'changed': False, 'warnings': []} self._want = list() self._have = list() def validate_size(self, value, type=None): if value: if type == 'buffer': if value and not int(307200) <= value <= int(125000000): self._module.fail_json(msg='buffer size must be between 307200 and 125000000') elif type == 'file': if value and not int(1) <= value <= int(2097152): self._module.fail_json(msg='file size must be between 1 and 2097152') return value def map_params_to_obj(self, required_if=None): aggregate = self._module.params.get('aggregate') if aggregate: for item in aggregate: for key in item: if item.get(key) is None: item[key] = self._module.params[key] d = item.copy() if d['dest'] not in ('host', 'file'): d['name'] = None if d['dest'] == 'buffered': if d['size'] is not None: d['size'] = str(self.validate_size(d['size'], 'buffer')) else: d['size'] = str(307200) elif d['dest'] == 'file': if d['size'] is not None: d['size'] = str(self.validate_size(d['size'], 'file')) else: d['size'] = str(2097152) else: d['size'] = None if self._flag == 'NC': d['level'] = severity_transpose[d['level']] self._want.append(d) else: params = self._module.params if params['dest'] not in ('host', 'file'): params['name'] = None if params['dest'] == 'buffered': if params['size'] is not None: params['size'] = str(self.validate_size(params['size'], 'buffer')) else: params['size'] = str(307200) elif params['dest'] == 'file': if params['size'] is not None: params['size'] = str(self.validate_size(params['size'], 'file')) else: params['size'] = str(2097152) else: params['size'] = None if self._flag == 'NC': params['level'] = severity_transpose[params['level']] self._want.append({ 'dest': params['dest'], 'name': params['name'], 'vrf': params['vrf'], 'size': params['size'], 'facility': params['facility'], 'level': params['level'], 'hostnameprefix': params['hostnameprefix'], 'state': params['state'] }) class CliConfiguration(ConfigBase): def __init__(self, module): super(CliConfiguration, self).__init__(module) self._file_list = set() self._host_list = set() def map_obj_to_commands(self): commands = list() for want_item in self._want: dest = want_item['dest'] name = want_item['name'] size = want_item['size'] facility = want_item['facility'] level = want_item['level'] vrf = want_item['vrf'] hostnameprefix = want_item['hostnameprefix'] state = want_item['state'] del want_item['state'] have_size = None have_console_level = None have_monitor_level = None have_prefix = None have_facility = None for item in self._have: if item['dest'] == 'buffered': have_size = item['size'] if item['dest'] == 'console': have_console_level = item['level'] if item['dest'] == 'monitor': have_monitor_level = item['level'] if item['dest'] is None and item['hostnameprefix'] is not None: have_prefix = item['hostnameprefix'] if item['dest'] is None and item['hostnameprefix'] is None and item['facility'] is not None: have_facility = item['facility'] if state == 'absent': if dest == 'host' and name in self._host_list: commands.append('no logging {0} vrf {1}'.format(name, vrf)) elif dest == 'file' and name in self._file_list: commands.append('no logging file {0}'.format(name)) elif dest == 'console' and have_console_level is not None: commands.append('no logging {0}'.format(dest)) elif dest == 'monitor' and have_monitor_level: commands.append('no logging {0}'.format(dest)) elif dest == 'buffered' and have_size: commands.append('no logging {0}'.format(dest)) if dest is None and hostnameprefix is not None and have_prefix == hostnameprefix: commands.append('no logging hostnameprefix {0}'.format(hostnameprefix)) if dest is None and facility is not None and have_facility == facility: commands.append('no logging facility {0}'.format(facility)) if state == 'present': if dest == 'host' and name not in self._host_list: if level == 'errors' or level == 'informational': level = severity_transpose[level] commands.append('logging {0} vrf {1} severity {2}'.format(name, vrf, level)) elif dest == 'file' and name not in self._file_list: if level == 'errors' or level == 'informational': level = severity_transpose[level] commands.append('logging file {0} maxfilesize {1} severity {2}'.format(name, size, level)) elif dest == 'buffered' and (have_size is None or (have_size is not None and size != have_size)): commands.append('logging buffered {0}'.format(size)) elif dest == 'console' and (have_console_level is None or (have_console_level is not None and have_console_level != level)): commands.append('logging console {0}'.format(level)) elif dest == 'monitor' and (have_monitor_level is None or (have_monitor_level is not None and have_monitor_level != level)): commands.append('logging monitor {0}'.format(level)) if dest is None and hostnameprefix is not None and (have_prefix is None or (have_prefix is not None and hostnameprefix != have_prefix)): commands.append('logging hostnameprefix {0}'.format(hostnameprefix)) if dest is None and hostnameprefix is None and facility != have_facility: commands.append('logging facility {0}'.format(facility)) self._result['commands'] = commands if commands: commit = not self._module.check_mode diff = load_config(self._module, commands, commit=commit) if diff: self._result['diff'] = dict(prepared=diff) self._result['changed'] = True def parse_facility(self, line): match = re.search(r'logging facility (\S+)', line, re.M) facility = None if match: facility = match.group(1) return facility def parse_size(self, line, dest): size = None if dest == 'buffered': match = re.search(r'logging buffered (\S+)', line, re.M) if match: try: int_size = int(match.group(1)) except ValueError: int_size = None if int_size is not None: if isinstance(int_size, int): size = str(match.group(1)) return size def parse_hostnameprefix(self, line): prefix = None match = re.search(r'logging hostnameprefix (\S+)', line, re.M) if match: prefix = match.group(1) return prefix def parse_name(self, line, dest): name = None if dest == 'file': match = re.search(r'logging file (\S+)', line, re.M) if match: name = match.group(1) elif dest == 'host': match = re.search(r'logging (\S+)', line, re.M) if match: name = match.group(1) return name def parse_level(self, line, dest): level_group = ('emergencies', 'alerts', 'critical', 'errors', 'warning', 'notifications', 'informational', 'debugging') level = None match = re.search(r'logging {0} (\S+)'.format(dest), line, re.M) if match: if match.group(1) in level_group: level = match.group(1) return level def parse_dest(self, line, group): dest_group = ('console', 'monitor', 'buffered', 'file') dest = None if group in dest_group: dest = group elif 'vrf' in line: dest = 'host' return dest def parse_vrf(self, line, dest): vrf = None if dest == 'host': match = re.search(r'logging (\S+) vrf (\S+)', line, re.M) if match: vrf = match.group(2) return vrf def map_config_to_obj(self): data = get_config(self._module, config_filter='logging') lines = data.split("\n") for line in lines: match = re.search(r'logging (\S+)', line, re.M) if match: dest = self.parse_dest(line, match.group(1)) name = self.parse_name(line, dest) if dest == 'host' and name is not None: self._host_list.add(name) if dest == 'file' and name is not None: self._file_list.add(name) self._have.append({ 'dest': dest, 'name': name, 'size': self.parse_size(line, dest), 'facility': self.parse_facility(line), 'level': self.parse_level(line, dest), 'vrf': self.parse_vrf(line, dest), 'hostnameprefix': self.parse_hostnameprefix(line), }) def run(self): self.map_params_to_obj() self.map_config_to_obj() self.map_obj_to_commands() return self._result class NCConfiguration(ConfigBase): def __init__(self, module): super(NCConfiguration, self).__init__(module) self._flag = 'NC' self._log_file_meta = collections.OrderedDict() self._log_host_meta = collections.OrderedDict() self._log_console_meta = collections.OrderedDict() self._log_monitor_meta = collections.OrderedDict() self._log_buffered_size_meta = collections.OrderedDict() self._log_buffered_level_meta = collections.OrderedDict() self._log_facility_meta = collections.OrderedDict() self._log_prefix_meta = collections.OrderedDict() def map_obj_to_xml_rpc(self): self._log_file_meta.update([ ('files', {'xpath': 'syslog/files', 'tag': True, 'operation': 'edit'}), ('file', {'xpath': 'syslog/files/file', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:name', {'xpath': 'syslog/files/file/file-name', 'operation': 'edit'}), ('file-attrib', {'xpath': 'syslog/files/file/file-log-attributes', 'tag': True, 'operation': 'edit'}), ('a:size', {'xpath': 'syslog/files/file/file-log-attributes/max-file-size', 'operation': 'edit'}), ('a:level', {'xpath': 'syslog/files/file/file-log-attributes/severity', 'operation': 'edit'}), ]) self._log_host_meta.update([ ('host-server', {'xpath': 'syslog/host-server', 'tag': True, 'operation': 'edit'}), ('vrfs', {'xpath': 'syslog/host-server/vrfs', 'tag': True, 'operation': 'edit'}), ('vrf', {'xpath': 'syslog/host-server/vrfs/vrf', 'tag': True, 'operation': 'edit'}), ('a:vrf', {'xpath': 'syslog/host-server/vrfs/vrf/vrf-name', 'operation': 'edit'}), ('ipv4s', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s', 'tag': True, 'operation': 'edit'}), ('ipv4', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:name', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4/address', 'operation': 'edit'}), ('ipv4-sev', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4/ipv4-severity-port', 'tag': True, 'operation': 'edit'}), ('a:level', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4/ipv4-severity-port/severity', 'operation': 'edit'}), ]) self._log_console_meta.update([ ('a:enable-console', {'xpath': 'syslog/enable-console-logging', 'operation': 'edit', 'attrib': "operation"}), ('console', {'xpath': 'syslog/console-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:console-level', {'xpath': 'syslog/console-logging/logging-level', 'operation': 'edit'}), ]) self._log_monitor_meta.update([ ('monitor', {'xpath': 'syslog/monitor-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:monitor-level', {'xpath': 'syslog/monitor-logging/logging-level', 'operation': 'edit'}), ]) self._log_buffered_size_meta.update([ ('buffered', {'xpath': 'syslog/buffered-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:size', {'xpath': 'syslog/buffered-logging/buffer-size', 'operation': 'edit'}), ]) self._log_buffered_level_meta.update([ ('buffered', {'xpath': 'syslog/buffered-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:level', {'xpath': 'syslog/buffered-logging/logging-level', 'operation': 'edit'}), ]) self._log_facility_meta.update([ ('facility', {'xpath': 'syslog/logging-facilities', 'tag': True, 'operation': 'edit', 'attrib': "operation"}), ('a:facility', {'xpath': 'syslog/logging-facilities/facility-level', 'operation': 'edit'}), ]) self._log_prefix_meta.update([ ('a:hostnameprefix', {'xpath': 'syslog/host-name-prefix', 'operation': 'edit', 'attrib': "operation"}), ]) state = self._module.params['state'] _get_filter = build_xml('syslog', opcode="filter") running = get_config(self._module, source='running', config_filter=_get_filter) file_ele = etree_findall(running, 'file') file_list = list() if len(file_ele): for file in file_ele: file_name = etree_find(file, 'file-name') file_list.append(file_name.text if file_name is not None else None) vrf_ele = etree_findall(running, 'vrf') host_list = list() for vrf in vrf_ele: host_ele = etree_findall(vrf, 'ipv4') for host in host_ele: host_name = etree_find(host, 'address') host_list.append(host_name.text if host_name is not None else None) console_ele = etree_find(running, 'console-logging') console_level = etree_find(console_ele, 'logging-level') if console_ele is not None else None have_console = console_level.text if console_level is not None else None monitor_ele = etree_find(running, 'monitor-logging') monitor_level = etree_find(monitor_ele, 'logging-level') if monitor_ele is not None else None have_monitor = monitor_level.text if monitor_level is not None else None buffered_ele = etree_find(running, 'buffered-logging') buffered_size = etree_find(buffered_ele, 'buffer-size') if buffered_ele is not None else None have_buffered = buffered_size.text if buffered_size is not None else None facility_ele = etree_find(running, 'logging-facilities') facility_level = etree_find(facility_ele, 'facility-level') if facility_ele is not None else None have_facility = facility_level.text if facility_level is not None else None prefix_ele = etree_find(running, 'host-name-prefix') have_prefix = prefix_ele.text if prefix_ele is not None else None console_enable_ele = etree_find(running, 'enable-console-logging') have_console_enable = console_enable_ele.text if console_enable_ele is not None else None file_params = list() host_params = list() console_params = dict() monitor_params = dict() buffered_params = dict() facility_params = dict() prefix_params = dict() opcode = None if state == 'absent': opcode = "delete" for item in self._want: if item['dest'] == 'file' and item['name'] in file_list: item['level'] = severity_level[item['level']] file_params.append(item) elif item['dest'] == 'host' and item['name'] in host_list: item['level'] = severity_level[item['level']] host_params.append(item) elif item['dest'] == 'console' and have_console: console_params.update({'console-level': item['level']}) elif item['dest'] == 'monitor' and have_monitor: monitor_params.update({'monitor-level': item['level']}) elif item['dest'] == 'buffered' and have_buffered: buffered_params['size'] = str(item['size']) if item['size'] else None buffered_params['level'] = item['level'] if item['level'] else None elif item['dest'] is None and item['hostnameprefix'] is None and \ item['facility'] is not None and have_facility: facility_params.update({'facility': item['facility']}) elif item['dest'] is None and item['hostnameprefix'] is not None and have_prefix: prefix_params.update({'hostnameprefix': item['hostnameprefix']}) elif state == 'present': opcode = 'merge' for item in self._want: if item['dest'] == 'file': item['level'] = severity_level[item['level']] file_params.append(item) elif item['dest'] == 'host': item['level'] = severity_level[item['level']] host_params.append(item) elif item['dest'] == 'console': console_params.update({'console-level': item['level']}) elif item['dest'] == 'monitor': monitor_params.update({'monitor-level': item['level']}) elif item['dest'] == 'buffered': buffered_params['size'] = str(item['size']) if item['size'] else None buffered_params['level'] = item['level'] if item['level'] else None elif item['dest'] is None and item['hostnameprefix'] is None and \ item['facility'] is not None: facility_params.update({'facility': item['facility']}) elif item['dest'] is None and item['hostnameprefix'] is not None: prefix_params.update({'hostnameprefix': item['hostnameprefix']}) self._result['xml'] = [] _edit_filter_list = list() if opcode: if len(file_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_file_meta, params=file_params, opcode=opcode)) if len(host_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_host_meta, params=host_params, opcode=opcode)) if len(console_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_console_meta, params=console_params, opcode=opcode)) if len(monitor_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_monitor_meta, params=monitor_params, opcode=opcode)) if len(buffered_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_buffered_size_meta, params=buffered_params, opcode=opcode)) _edit_filter_list.append(build_xml('syslog', xmap=self._log_buffered_level_meta, params=buffered_params, opcode=opcode)) if len(facility_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_facility_meta, params=facility_params, opcode=opcode)) if len(prefix_params): _edit_filter_list.append(build_xml('syslog', xmap=self._log_prefix_meta, params=prefix_params, opcode=opcode)) diff = None if len(_edit_filter_list): commit = not self._module.check_mode diff = load_config(self._module, _edit_filter_list, commit=commit, running=running, nc_get_filter=_get_filter) if diff: if self._module._diff: self._result['diff'] = dict(prepared=diff) self._result['xml'] = _edit_filter_list self._result['changed'] = True def run(self): self.map_params_to_obj() self.map_obj_to_xml_rpc() return self._result def main(): """ main entry point for module execution """ element_spec = dict( dest=dict(type='str', choices=['host', 'console', 'monitor', 'buffered', 'file']), name=dict(type='str'), size=dict(type='int'), vrf=dict(type='str', default='default'), facility=dict(type='str', default='local7'), hostnameprefix=dict(type='str'), level=dict(type='str', default='informational', aliases=['severity'], choices=['emergencies', 'alerts', 'critical', 'errors', 'warning', 'notifications', 'informational', 'debugging']), state=dict(default='present', choices=['present', 'absent']), ) aggregate_spec = deepcopy(element_spec) # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) mutually_exclusive = [('dest', 'facility', 'hostnameprefix')] required_if = [('dest', 'host', ['name']), ('dest', 'file', ['name']), ('dest', 'buffered', ['size']), ('dest', 'console', ['level']), ('dest', 'monitor', ['level'])] argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec, mutually_exclusive=mutually_exclusive, required_if=required_if), ) argument_spec.update(element_spec) argument_spec.update(iosxr_argument_spec) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True) config_object = None if is_cliconf(module): module.deprecate(msg="cli support for 'iosxr_logging' is deprecated. Use transport netconf instead", version="2.9") config_object = CliConfiguration(module) elif is_netconf(module): config_object = NCConfiguration(module) if config_object: result = config_object.run() module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
-598,276,708,112,361,100
40.975207
132
0.546269
false
google-research/mma
libml/data_pair.py
1
2663
# coding=utf-8 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from absl import flags import numpy as np import tensorflow as tf from libml import data from libml.data import DynamicDataset, augment_cifar10, augment_svhn_extra FLAGS = flags.FLAGS flags.DEFINE_integer('nu', 2, 'Number of augmentations for class-consistency.') def stack_augment(augment): def func(x): xl = [augment(x) for _ in range(FLAGS.nu)] return dict(image=tf.stack([x['image'] for x in xl]), label=tf.stack([x['label'] for x in xl])) return func DATASETS = {} # ------ cifar10 ------ # DATASETS.update([DynamicDataset.creator_small_data('cifar10', seed, train, label, [augment_cifar10, stack_augment(augment_cifar10)]) for seed, train, label in itertools.product(range(10), range(5000, 50001, 5000), [250])]) # ------ cifar100 ------ # DATASETS.update([DynamicDataset.creator_small_data('cifar100', seed, train, label, [augment_cifar10, stack_augment(augment_cifar10)], nclass=100) for seed, train, label in itertools.product(range(10), range(5000, 50001, 5000), [2500])]) # ------ svhn ------ # DATASETS.update([DynamicDataset.creator_small_data('svhn', seed, train, label, [augment_svhn_extra, stack_augment(augment_svhn_extra)], sampc=True) for seed, train, label in itertools.product([1,2,3,4,5], [5000, 10000, 20000, 50000, 73257], [250, 25000])]) # ------ svhn_extra ------ # DATASETS.update([DynamicDataset.creator_small_data('svhn_extra', seed, train, label, [augment_svhn_extra, stack_augment(augment_svhn_extra)], sampc=True) for seed, train, label in itertools.product([1,2,3,4,5], [5000, 10000, 20000, 50000, 100000, 200000, 400000, 604388], [250])])
apache-2.0
7,481,708,591,580,205,000
39.969231
117
0.593316
false
rlr/fjord
vendor/packages/translate-toolkit/translate/lang/test_common.py
3
5288
#!/usr/bin/env python # -*- coding: utf-8 -*- from translate.lang import common from pytest import mark def test_characters(): """Test the basic characters segmentation""" language = common.Common assert language.characters(u"") == [] assert language.characters(u"Four") == [u"F", u"o", u"u", u"r"] assert language.characters(u"A B") == [u"A", u" ", u"B"] # Spaces are compacted, source has 2 returned has only one assert language.characters(u"A B") == [u"A", u" ", u"B"] def test_words(): """Tests basic functionality of word segmentation.""" language = common.Common words = language.words(u"") assert words == [] words = language.words(u"test sentence.") assert words == [u"test", u"sentence"] words = language.words(u"This is a weird test .") assert words == [u"This", u"is", u"a", u"weird", u"test"] words = language.words(u"Don't send e-mail!") assert words == [u"Don't", u"send", u"e-mail"] words = language.words(u"Don’t send e-mail!") assert words == [u"Don’t", u"send", u"e-mail"] @mark.xfail("sys.version_info >= (2, 6)", reason="ZWS " "is not considered a space in Python 2.6+. Khmer should extend " "words() to include \\u200b in addition to other word breakers.") def test_word_khmer(): language = common.Common # Let's test Khmer with zero width space (\u200b) words = language.words(u"ផ្ដល់​យោបល់") print u"ផ្ដល់​យោបល់" print language.words(u"ផ្ដល់<200b>យោបល់") print [u"ផ្ដល់", u"យោបល់"] assert words == [u"ផ្ដល់", u"យោបល់"] def test_sentences(): """Tests basic functionality of sentence segmentation.""" language = common.Common # Check that we correctly handle an empty string: sentences = language.sentences(u"") sentences = language.sentences(u"This is a sentence.") assert sentences == [u"This is a sentence."] sentences = language.sentences(u"This is a sentence") assert sentences == [u"This is a sentence"] sentences = language.sentences(u"This is a sentence. Another one.") assert sentences == [u"This is a sentence.", u"Another one."] sentences = language.sentences(u"This is a sentence. Another one. Bla.") assert sentences == [u"This is a sentence.", u"Another one.", u"Bla."] sentences = language.sentences(u"This is a sentence.Not another one.") assert sentences == [u"This is a sentence.Not another one."] sentences = language.sentences(u"Exclamation! Really? No...") assert sentences == [u"Exclamation!", u"Really?", u"No..."] sentences = language.sentences(u"Four i.e. 1+3. See?") assert sentences == [u"Four i.e. 1+3.", u"See?"] sentences = language.sentences(u"Apples, bananas, etc. are nice.") assert sentences == [u"Apples, bananas, etc. are nice."] sentences = language.sentences(u"Apples, bananas, etc.\nNext part") assert sentences == [u"Apples, bananas, etc.", u"Next part"] sentences = language.sentences(u"No font for displaying text in encoding '%s' found,\nbut an alternative encoding '%s' is available.\nDo you want to use this encoding (otherwise you will have to choose another one)?") assert sentences == [u"No font for displaying text in encoding '%s' found,\nbut an alternative encoding '%s' is available.", u"Do you want to use this encoding (otherwise you will have to choose another one)?"] # Test that a newline at the end won't confuse us sentences = language.sentences(u"The first sentence. The second sentence.\n") assert sentences == [u"The first sentence.", u"The second sentence."] sentences = language.sentences(u"P.O. box") assert sentences == [u"P.O. box"] sentences = language.sentences(u"Doen dit d.m.v. koeie.") assert sentences == [u"Doen dit d.m.v. koeie."] def test_capsstart(): """Tests for basic sane behaviour in startcaps().""" language = common.Common assert language.capsstart("Open cow file") assert language.capsstart("'Open' cow file") assert not language.capsstart("open cow file") assert not language.capsstart(":") assert not language.capsstart("") def test_numstart(): """Tests for basic sane behaviour in startcaps().""" language = common.Common assert language.numstart("360 degress") assert language.numstart("3D file") assert not language.numstart("Open 360 degrees") assert not language.numstart(":") assert not language.numstart("") def test_punctranslate(): """Test the basic punctranslate function""" language = common.Common assert not language.punctranslate(u"A...") == u"A…" language.puncdict = {u"...": u"…"} assert language.punctranslate(u"A...") == u"A…" def test_length_difference(): """Test the heuristics of the length difference function""" # Expansion with no code assert common.Common.length_difference(10) == 6 assert common.Common.length_difference(100) == 15 assert common.Common.length_difference(300) == 35 def test_alter_length(): """Test that we create the correct length by adding or removing characters""" assert common.Common.alter_length("One two three") == "One twOne two three"
bsd-3-clause
-8,005,650,521,094,000,000
40.392
221
0.661384
false
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
python-packages/mne-python-0.10/mne/io/tests/test_reference.py
3
13025
# Authors: Marijn van Vliet <[email protected]> # Alexandre Gramfort <[email protected]> # Teon Brooks <[email protected]> # # License: BSD (3-clause) import warnings import os.path as op import numpy as np from nose.tools import assert_true, assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_allclose from mne import pick_types, Evoked, Epochs, read_events from mne.io.constants import FIFF from mne.io import (set_eeg_reference, set_bipolar_reference, add_reference_channels) from mne.io.proj import _has_eeg_average_ref_proj from mne.io.reference import _apply_reference from mne.datasets import testing from mne.io import Raw warnings.simplefilter('always') # enable b/c these tests throw warnings data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample') fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif') eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif') ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif') def _test_reference(raw, reref, ref_data, ref_from): """Helper function to test whether a reference has been correctly applied.""" # Separate EEG channels from other channel types picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads') picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True, stim=True, exclude='bads') # Calculate indices of reference channesl picks_ref = [raw.ch_names.index(ch) for ch in ref_from] # Get data if isinstance(raw, Evoked): _data = raw.data _reref = reref.data else: _data = raw._data _reref = reref._data # Check that the ref has been properly computed assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2)) # Get the raw EEG data and other channel data raw_eeg_data = _data[..., picks_eeg, :] raw_other_data = _data[..., picks_other, :] # Get the rereferenced EEG data reref_eeg_data = _reref[..., picks_eeg, :] reref_other_data = _reref[..., picks_other, :] # Undo rereferencing of EEG channels if isinstance(raw, Epochs): unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :] else: unref_eeg_data = reref_eeg_data + ref_data # Check that both EEG data and other data is the same assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15) assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15) @testing.requires_testing_data def test_apply_reference(): """Test base function for rereferencing""" raw = Raw(fif_fname, preload=True) # Rereference raw data by creating a copy of original data reref, ref_data = _apply_reference(raw, ref_from=['EEG 001', 'EEG 002'], copy=True) assert_true(reref.info['custom_ref_applied']) _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) # The CAR reference projection should have been removed by the function assert_true(not _has_eeg_average_ref_proj(reref.info['projs'])) # Test that disabling the reference does not break anything reref, ref_data = _apply_reference(raw, []) assert_array_equal(raw._data, reref._data) # Test that data is modified in place when copy=False reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'], copy=False) assert_true(raw is reref) # Test re-referencing Epochs object raw = Raw(fif_fname, preload=False, add_eeg_ref=False) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) reref, ref_data = _apply_reference(epochs, ref_from=['EEG 001', 'EEG 002'], copy=True) assert_true(reref.info['custom_ref_applied']) _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002']) # Test re-referencing Evoked object evoked = epochs.average() reref, ref_data = _apply_reference(evoked, ref_from=['EEG 001', 'EEG 002'], copy=True) assert_true(reref.info['custom_ref_applied']) _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002']) # Test invalid input raw_np = Raw(fif_fname, preload=False) assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001']) @testing.requires_testing_data def test_set_eeg_reference(): """Test rereference eeg data""" raw = Raw(fif_fname, preload=True) raw.info['projs'] = [] # Test setting an average reference assert_true(not _has_eeg_average_ref_proj(raw.info['projs'])) reref, ref_data = set_eeg_reference(raw) assert_true(_has_eeg_average_ref_proj(reref.info['projs'])) assert_true(ref_data is None) # Test setting an average reference when one was already present reref, ref_data = set_eeg_reference(raw, copy=False) assert_true(ref_data is None) # Rereference raw data by creating a copy of original data reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True) assert_true(reref.info['custom_ref_applied']) _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) # Test that data is modified in place when copy=False reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=False) assert_true(raw is reref) @testing.requires_testing_data def test_set_bipolar_reference(): """Test bipolar referencing""" raw = Raw(fif_fname, preload=True) reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar', {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'}) assert_true(reref.info['custom_ref_applied']) # Compare result to a manual calculation a = raw.pick_channels(['EEG 001', 'EEG 002'], copy=True) a = a._data[0, :] - a._data[1, :] b = reref.pick_channels(['bipolar'], copy=True)._data[0, :] assert_allclose(a, b) # Original channels should be replaced by a virtual one assert_true('EEG 001' not in reref.ch_names) assert_true('EEG 002' not in reref.ch_names) assert_true('bipolar' in reref.ch_names) # Check channel information bp_info = reref.info['chs'][reref.ch_names.index('bipolar')] an_info = reref.info['chs'][raw.ch_names.index('EEG 001')] for key in bp_info: if key == 'loc': assert_array_equal(bp_info[key], 0) elif key == 'coil_type': assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR) elif key == 'kind': assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH) else: assert_equal(bp_info[key], an_info[key]) assert_equal(bp_info['extra'], 'some extra value') # Minimalist call reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002') assert_true('EEG 001-EEG 002' in reref.ch_names) # Test creating a bipolar reference that doesn't involve EEG channels: # it should not set the custom_ref_applied flag reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112', ch_info={'kind': FIFF.FIFFV_MEG_CH}) assert_true(not reref.info['custom_ref_applied']) assert_true('MEG 0111-MEG 0112' in reref.ch_names) # Test a battery of invalid inputs assert_raises(ValueError, set_bipolar_reference, raw, 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar') assert_raises(ValueError, set_bipolar_reference, raw, ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar') assert_raises(ValueError, set_bipolar_reference, raw, 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2']) assert_raises(ValueError, set_bipolar_reference, raw, 'EEG 001', 'EEG 002', 'bipolar', ch_info=[{'foo': 'bar'}, {'foo': 'bar'}]) assert_raises(ValueError, set_bipolar_reference, raw, 'EEG 001', 'EEG 002', ch_name='EEG 003') @testing.requires_testing_data def test_add_reference(): raw = Raw(fif_fname, preload=True) picks_eeg = pick_types(raw.info, meg=False, eeg=True) # check if channel already exists assert_raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) # add reference channel to Raw raw_ref = add_reference_channels(raw, 'Ref', copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) orig_nchan = raw.info['nchan'] raw = add_reference_channels(raw, 'Ref', copy=False) assert_array_equal(raw._data, raw_ref._data) assert_equal(raw.info['nchan'], orig_nchan + 1) ref_idx = raw.ch_names.index('Ref') ref_data, _ = raw[ref_idx] assert_array_equal(ref_data, 0) # add two reference channels to Raw raw = Raw(fif_fname, preload=True) picks_eeg = pick_types(raw.info, meg=False, eeg=True) assert_raises(ValueError, add_reference_channels, raw, raw.info['ch_names'][0]) raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True) assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) raw = add_reference_channels(raw, ['M1', 'M2'], copy=False) ref_idx = raw.ch_names.index('M1') ref_idy = raw.ch_names.index('M2') ref_data, _ = raw[[ref_idx, ref_idy]] assert_array_equal(ref_data, 0) # add reference channel to epochs raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) epochs_ref = add_reference_channels(epochs, 'Ref', copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) ref_idx = epochs_ref.ch_names.index('Ref') ref_data = epochs_ref.get_data()[:, ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add two reference channels to epochs raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True) assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) ref_idx = epochs_ref.ch_names.index('M1') ref_idy = epochs_ref.ch_names.index('M2') ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(epochs.info, meg=False, eeg=True) assert_array_equal(epochs.get_data()[:, picks_eeg, :], epochs_ref.get_data()[:, picks_eeg, :]) # add reference channel to evoked raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) evoked = epochs.average() evoked_ref = add_reference_channels(evoked, 'Ref', copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) ref_idx = evoked_ref.ch_names.index('Ref') ref_data = evoked_ref.data[ref_idx, :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # add two reference channels to evoked raw = Raw(fif_fname, preload=True) events = read_events(eve_fname) picks_eeg = pick_types(raw.info, meg=False, eeg=True) epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, picks=picks_eeg, preload=True) evoked = epochs.average() evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True) assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) ref_idx = evoked_ref.ch_names.index('M1') ref_idy = evoked_ref.ch_names.index('M2') ref_data = evoked_ref.data[[ref_idx, ref_idy], :] assert_array_equal(ref_data, 0) picks_eeg = pick_types(evoked.info, meg=False, eeg=True) assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) # Test invalid inputs raw_np = Raw(fif_fname, preload=False) assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref']) assert_raises(ValueError, add_reference_channels, raw, 1)
bsd-3-clause
1,385,957,040,128,003,300
41.42671
79
0.629251
false
dschwoerer/espresso
src/python/espressomd/gen_code_info.py
4
1800
# Copyright (C) 2016 The ESPResSo project # Copyright (C) 2014 Olaf Lenz # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # This script generates code_info.pyx # from __future__ import print_function import inspect import sys import os # find featuredefs.py moduledir = os.path.dirname(inspect.getfile(inspect.currentframe())) sys.path.append(os.path.join(moduledir, '..', '..')) import featuredefs if len(sys.argv) != 3: print("Usage: {} DEFFILE OYXFILE".format(sys.argv[0]), file=sys.stderr) exit(2) deffilename, cfilename = sys.argv[1:3] print("Reading definitions from " + deffilename + "...") defs = featuredefs.defs(deffilename) print("Done.") # generate cpp-file print("Writing " + cfilename + "...") cfile = open(cfilename, 'w') cfile.write(""" # This file is autogenerated by gen_code_info.py. # DO NOT EDIT MANUALLY, CHANGES WILL BE LOST include "myconfig.pxi" def features(): \"\"\"Returns list of features compiled into Espresso core\"\"\" f=[] """) template = """ IF {0} == 1: f.append("{0}") """ for feature in defs.allfeatures: cfile.write(template.format(feature)) cfile.write(""" return sorted(f) """) cfile.close() print("Done.")
gpl-3.0
7,520,917,404,588,576
25.086957
75
0.708333
false
jshiv/turntable
test/lib/python2.7/site-packages/scipy/linalg/decomp_schur.py
9
8380
"""Schur decomposition functions.""" from __future__ import division, print_function, absolute_import import numpy from numpy import asarray_chkfinite, single, asarray from scipy.lib.six import callable # Local imports. from . import misc from .misc import LinAlgError, _datacopied from .lapack import get_lapack_funcs from .decomp import eigvals __all__ = ['schur', 'rsf2csf'] _double_precision = ['i','l','d'] def schur(a, output='real', lwork=None, overwrite_a=False, sort=None, check_finite=True): """ Compute Schur decomposition of a matrix. The Schur decomposition is:: A = Z T Z^H where Z is unitary and T is either upper-triangular, or for real Schur decomposition (output='real'), quasi-upper triangular. In the quasi-triangular form, 2x2 blocks describing complex-valued eigenvalue pairs may extrude from the diagonal. Parameters ---------- a : (M, M) array_like Matrix to decompose output : {'real', 'complex'}, optional Construct the real or complex Schur decomposition (for real matrices). lwork : int, optional Work array size. If None or -1, it is automatically computed. overwrite_a : bool, optional Whether to overwrite data in a (may improve performance). sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given a eigenvalue, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). Alternatively, string parameters may be used:: 'lhp' Left-hand plane (x.real < 0.0) 'rhp' Right-hand plane (x.real > 0.0) 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0) 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) Defaults to None (no sorting). check_finite : boolean, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- T : (M, M) ndarray Schur form of A. It is real-valued for the real Schur decomposition. Z : (M, M) ndarray An unitary Schur transformation matrix for A. It is real-valued for the real Schur decomposition. sdim : int If and only if sorting was requested, a third return value will contain the number of eigenvalues satisfying the sort condition. Raises ------ LinAlgError Error raised under three conditions: 1. The algorithm failed due to a failure of the QR algorithm to compute all eigenvalues 2. If eigenvalue sorting was requested, the eigenvalues could not be reordered due to a failure to separate eigenvalues, usually because of poor conditioning 3. If eigenvalue sorting was requested, roundoff errors caused the leading eigenvalues to no longer satisfy the sorting condition See also -------- rsf2csf : Convert real Schur form to complex Schur form """ if output not in ['real','complex','r','c']: raise ValueError("argument must be 'real', or 'complex'") if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): raise ValueError('expected square matrix') typ = a1.dtype.char if output in ['complex','c'] and typ not in ['F','D']: if typ in _double_precision: a1 = a1.astype('D') typ = 'D' else: a1 = a1.astype('F') typ = 'F' overwrite_a = overwrite_a or (_datacopied(a1, a)) gees, = get_lapack_funcs(('gees',), (a1,)) if lwork is None or lwork == -1: # get optimal work array result = gees(lambda x: None, a1, lwork=-1) lwork = result[-2][0].real.astype(numpy.int) if sort is None: sort_t = 0 sfunction = lambda x: None else: sort_t = 1 if callable(sort): sfunction = sort elif sort == 'lhp': sfunction = lambda x: (numpy.real(x) < 0.0) elif sort == 'rhp': sfunction = lambda x: (numpy.real(x) >= 0.0) elif sort == 'iuc': sfunction = lambda x: (abs(x) <= 1.0) elif sort == 'ouc': sfunction = lambda x: (abs(x) > 1.0) else: raise ValueError("sort parameter must be None, a callable, or " + "one of ('lhp','rhp','iuc','ouc')") result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a, sort_t=sort_t) info = result[-1] if info < 0: raise ValueError('illegal value in %d-th argument of internal gees' % -info) elif info == a1.shape[0] + 1: raise LinAlgError('Eigenvalues could not be separated for reordering.') elif info == a1.shape[0] + 2: raise LinAlgError('Leading eigenvalues do not satisfy sort condition.') elif info > 0: raise LinAlgError("Schur form not found. Possibly ill-conditioned.") if sort_t == 0: return result[0], result[-3] else: return result[0], result[-3], result[1] eps = numpy.finfo(float).eps feps = numpy.finfo(single).eps _array_kind = {'b':0, 'h':0, 'B': 0, 'i':0, 'l': 0, 'f': 0, 'd': 0, 'F': 1, 'D': 1} _array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} _array_type = [['f', 'd'], ['F', 'D']] def _commonType(*arrays): kind = 0 precision = 0 for a in arrays: t = a.dtype.char kind = max(kind, _array_kind[t]) precision = max(precision, _array_precision[t]) return _array_type[kind][precision] def _castCopy(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.char == type: cast_arrays = cast_arrays + (a.copy(),) else: cast_arrays = cast_arrays + (a.astype(type),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays def rsf2csf(T, Z, check_finite=True): """ Convert real Schur form to complex Schur form. Convert a quasi-diagonal real-valued Schur form to the upper triangular complex-valued Schur form. Parameters ---------- T : (M, M) array_like Real Schur form of the original matrix Z : (M, M) array_like Schur transformation matrix check_finite : boolean, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- T : (M, M) ndarray Complex Schur form of the original matrix Z : (M, M) ndarray Schur transformation matrix corresponding to the complex form See also -------- schur : Schur decompose a matrix """ if check_finite: Z, T = map(asarray_chkfinite, (Z, T)) else: Z,T = map(asarray, (Z,T)) if len(Z.shape) != 2 or Z.shape[0] != Z.shape[1]: raise ValueError("matrix must be square.") if len(T.shape) != 2 or T.shape[0] != T.shape[1]: raise ValueError("matrix must be square.") if T.shape[0] != Z.shape[0]: raise ValueError("matrices must be same dimension.") N = T.shape[0] arr = numpy.array t = _commonType(Z, T, arr([3.0],'F')) Z, T = _castCopy(t, Z, T) conj = numpy.conj dot = numpy.dot r_ = numpy.r_ transp = numpy.transpose for m in range(N-1, 0, -1): if abs(T[m,m-1]) > eps*(abs(T[m-1,m-1]) + abs(T[m,m])): k = slice(m-1, m+1) mu = eigvals(T[k,k]) - T[m,m] r = misc.norm([mu[0], T[m,m-1]]) c = mu[0] / r s = T[m,m-1] / r G = r_[arr([[conj(c), s]], dtype=t), arr([[-s, c]], dtype=t)] Gc = conj(transp(G)) j = slice(m-1, N) T[k,j] = dot(G, T[k,j]) i = slice(0, m+1) T[i,k] = dot(T[i,k], Gc) i = slice(0, N) Z[i,k] = dot(Z[i,k], Gc) T[m,m-1] = 0.0 return T, Z
mit
-6,437,494,931,466,885,000
32.927126
83
0.576611
false
Connexions/rbit
setup.py
1
1039
# -*- coding: utf-8 -*- """ Copyright (C) 2013 Rice University This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL). See LICENSE.txt for details. """ import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = os.path.join(here, 'README.rst') install_requirements = [ 'pybit', 'pika', 'jsonpickle', 'requests', ] test_requirements = [ 'mock', ] setup( name='rbit', version='1.0', author="Connexions/Rhaptos Team", author_email="[email protected]", description='Rhaptos PyBit client implementation', long_description=open(README).read(), url='https://github.com/connexions/rbit', license='GPL2', # See also LICENSE.txt packages=find_packages(), include_package_data=True, install_requires=install_requirements, extras_require={ 'tests': test_requirements, }, entry_points = """\ [console_scripts] rbit = rbit:main """, )
lgpl-2.1
5,691,639,557,888,541,000
22.088889
105
0.641001
false
fusionpig/ansible
lib/ansible/constants.py
3
16716
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type import os import pwd import sys from string import ascii_letters, digits from six import string_types from six.moves import configparser from ansible.parsing.splitter import unquote from ansible.errors import AnsibleOptionsError # copied from utils, avoid circular reference fun :) def mk_boolean(value): if value is None: return False val = str(value) if val.lower() in [ "true", "t", "y", "1", "yes" ]: return True else: return False def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False): ''' return a configuration variable with casting ''' value = _get_config(p, section, key, env_var, default) if boolean: value = mk_boolean(value) if value: if integer: value = int(value) elif floating: value = float(value) elif islist: if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif isinstance(value, string_types): value = unquote(value) return value def _get_config(p, section, key, env_var, default): ''' helper function for get_config ''' if env_var is not None: value = os.environ.get(env_var, None) if value is not None: return value if p is not None: try: return p.get(section, key, raw=True) except: return default return default def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' p = configparser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = os.path.expanduser(path0) if os.path.isdir(path0): path0 += "/ansible.cfg" path1 = os.getcwd() + "/ansible.cfg" path2 = os.path.expanduser("~/.ansible.cfg") path3 = "/etc/ansible/ansible.cfg" for path in [path0, path1, path2, path3]: if path is not None and os.path.exists(path): try: p.read(path) except configparser.Error as e: raise AnsibleOptionsError("Error reading config file: \n{0}".format(e)) return p, path return None, '' def shell_expand_path(path): ''' shell_expand_path is needed as os.path.expanduser does not work when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE ''' if path: path = os.path.expanduser(os.path.expandvars(path)) return path p, CONFIG_FILE = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] # check all of these extensions when looking for yaml files for things like # group variables -- really anything we can load YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] # sections in config file DEFAULTS='defaults' DEPRECATED_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts')) # generally configurable things DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST)) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8') DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True) DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True) DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None)) DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart') DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True) DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True) DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}') DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True) DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) ### PRIVILEGE ESCALATION ### # Backwards Compat DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) # Become BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas'] DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None) DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # Plugin paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins') DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins') DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True) # Display ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True) HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True) SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True) DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True) DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) # obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True) ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True) ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # galaxy related DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024 TREE_DIR = None LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
gpl-3.0
7,276,075,401,001,199,000
66.403226
195
0.665769
false
zhoucen/gunicorn
setup.py
10
3070
# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import sys from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand from gunicorn import __version__ CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Internet', 'Topic :: Utilities', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'] # read long description with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: long_description = f.read() # read dev requirements fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt') with open(fname) as f: tests_require = [l.strip() for l in f.readlines()] if sys.version_info[:2] < (3, 3): # recent versions of mock doesn't support Python 2.6 if sys.version_info[:2] == (2, 6): tests_require.append('mock==1.0.1') else: tests_require.append('mock') if sys.version_info[:2] < (2, 7): tests_require.append('unittest2') class PyTestCommand(TestCommand): user_options = [ ("cov", None, "measure coverage") ] def initialize_options(self): TestCommand.initialize_options(self) self.cov = None def finalize_options(self): TestCommand.finalize_options(self) self.test_args = ['tests'] if self.cov: self.test_args += ['--cov', 'gunicorn'] self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.test_args) sys.exit(errno) setup( name='gunicorn', version=__version__, description='WSGI HTTP Server for UNIX', long_description=long_description, author='Benoit Chesneau', author_email='[email protected]', license='MIT', url='http://gunicorn.org', classifiers=CLASSIFIERS, zip_safe=False, packages=find_packages(exclude=['examples', 'tests']), include_package_data=True, tests_require=tests_require, cmdclass={'test': PyTestCommand}, entry_points=""" [console_scripts] gunicorn=gunicorn.app.wsgiapp:run gunicorn_django=gunicorn.app.djangoapp:run gunicorn_paster=gunicorn.app.pasterapp:run [paste.server_runner] main=gunicorn.app.pasterapp:paste_server """ )
mit
8,594,250,494,716,624,000
28.238095
72
0.638111
false
deKupini/erp
addons/website/models/ir_http.py
3
13266
# -*- coding: utf-8 -*- import datetime import hashlib import logging import os import re import traceback import werkzeug import werkzeug.routing import werkzeug.utils import openerp from openerp.addons.base import ir from openerp.addons.base.ir import ir_qweb from openerp.addons.website.models.website import slug, url_for, _UNSLUG_RE from openerp.http import request from openerp.tools import config from openerp.osv import orm logger = logging.getLogger(__name__) class RequestUID(object): def __init__(self, **kw): self.__dict__.update(kw) class ir_http(orm.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 geo_ip_resolver = None def _get_converters(self): return dict( super(ir_http, self)._get_converters(), model=ModelConverter, page=PageConverter, ) def _auth_method_public(self): if not request.session.uid: domain_name = request.httprequest.environ.get('HTTP_HOST', '').split(':')[0] website_id = self.pool['website']._get_current_website_id(request.cr, openerp.SUPERUSER_ID, domain_name, context=request.context) if website_id: request.uid = self.pool['website'].browse(request.cr, openerp.SUPERUSER_ID, website_id, request.context).user_id.id else: request.uid = self.pool['ir.model.data'].xmlid_to_res_id(request.cr, openerp.SUPERUSER_ID, 'base', 'public_user') else: request.uid = request.session.uid def _dispatch(self): first_pass = not hasattr(request, 'website') request.website = None func = None try: func, arguments = self._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True request.website_multilang = request.website_enabled and func and func.routing.get('multilang', True) if 'geoip' not in request.session: record = {} if self.geo_ip_resolver is None: try: import GeoIP # updated database can be downloaded on MaxMind website # http://dev.maxmind.com/geoip/legacy/install/city/ geofile = config.get('geoip_database') if os.path.exists(geofile): self.geo_ip_resolver = GeoIP.open(geofile, GeoIP.GEOIP_STANDARD) else: self.geo_ip_resolver = False logger.warning('GeoIP database file %r does not exists', geofile) except ImportError: self.geo_ip_resolver = False if self.geo_ip_resolver and request.httprequest.remote_addr: record = self.geo_ip_resolver.record_by_addr(request.httprequest.remote_addr) or {} request.session['geoip'] = record if request.website_enabled: try: if func: self._authenticate(func.routing['auth']) else: self._auth_method_public() except Exception as e: return self._handle_exception(e) request.redirect = lambda url, code=302: werkzeug.utils.redirect(url_for(url), code) request.website = request.registry['website'].get_current_website(request.cr, request.uid, context=request.context) request.context['website_id'] = request.website.id langs = [lg[0] for lg in request.website.get_languages()] path = request.httprequest.path.split('/') if first_pass: if request.website_multilang: # If the url doesn't contains the lang and that it's the first connection, we to retreive the user preference if it exists. if not path[1] in langs and not request.httprequest.cookies.get('session_id'): if request.lang not in langs: # Try to find a similar lang. Eg: fr_BE and fr_FR short = request.lang.split('_')[0] langs_withshort = [lg[0] for lg in request.website.get_languages() if lg[0].startswith(short)] if len(langs_withshort): request.lang = langs_withshort[0] else: request.lang = request.website.default_lang_code # We redirect with the right language in url if request.lang != request.website.default_lang_code: path.insert(1, request.lang) path = '/'.join(path) or '/' return request.redirect(path + '?' + request.httprequest.query_string) else: request.lang = request.website.default_lang_code request.context['lang'] = request.lang if not request.context.get('tz'): request.context['tz'] = request.session['geoip'].get('time_zone') if not func: if path[1] in langs: request.lang = request.context['lang'] = path.pop(1) path = '/'.join(path) or '/' if request.lang == request.website.default_lang_code: # If language is in the url and it is the default language, redirect # to url without language so google doesn't see duplicate content return request.redirect(path + '?' + request.httprequest.query_string, code=301) return self.reroute(path) # bind modified context request.website = request.website.with_context(request.context) return super(ir_http, self)._dispatch() def reroute(self, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > self.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return self._dispatch() def _postprocess_args(self, arguments, rule): super(ir_http, self)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.BaseModel) and isinstance(val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception, e: return self._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301) def _handle_exception(self, exception, code=500): is_website_request = bool(getattr(request, 'website_enabled', False) and request.website) if not is_website_request: # Don't touch non website requests exception handling return super(ir_http, self)._handle_exception(exception) else: try: response = super(ir_http, self)._handle_exception(exception) if isinstance(response, Exception): exception = response else: # if parent excplicitely returns a plain response, then we don't touch it return response except Exception, e: if openerp.tools.config['dev_mode'] and (not isinstance(exception, ir_qweb.QWebException) or not exception.qweb.get('cause')): raise exception = e values = dict( exception=exception, traceback=traceback.format_exc(exception), ) if isinstance(exception, werkzeug.exceptions.HTTPException): if exception.code is None: # Hand-crafted HTTPException likely coming from abort(), # usually for a redirect response -> return it directly return exception else: code = exception.code if isinstance(exception, openerp.exceptions.AccessError): code = 403 if isinstance(exception, ir_qweb.QWebException): values.update(qweb_exception=exception) if isinstance(exception.qweb.get('cause'), openerp.exceptions.AccessError): code = 403 if code == 500: logger.error("500 Internal Server Error:\n\n%s", values['traceback']) if 'qweb_exception' in values: view = request.registry.get("ir.ui.view") views = view._views_get(request.cr, request.uid, exception.qweb['template'], request.context) to_reset = [v for v in views if v.model_data_id.noupdate is True and not v.page] values['views'] = to_reset elif code == 403: logger.warn("403 Forbidden:\n\n%s", values['traceback']) values.update( status_message=werkzeug.http.HTTP_STATUS_CODES[code], status_code=code, ) if not request.uid: self._auth_method_public() try: html = request.website._render('website.%s' % code, values) except Exception: html = request.website._render('website.http_error', values) return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8') class ModelConverter(ir.ir_http.ModelConverter): def __init__(self, url_map, model=False, domain='[]'): super(ModelConverter, self).__init__(url_map, model) self.domain = domain self.regex = _UNSLUG_RE.pattern def to_url(self, value): return slug(value) def to_python(self, value): m = re.match(self.regex, value) _uid = RequestUID(value=value, match=m, converter=self) record_id = int(m.group(2)) if record_id < 0: # limited support for negative IDs due to our slug pattern, assume abs() if not found if not request.registry[self.model].exists(request.cr, _uid, [record_id]): record_id = abs(record_id) return request.registry[self.model].browse( request.cr, _uid, record_id, context=request.context) def generate(self, cr, uid, query=None, args=None, context=None): obj = request.registry[self.model] domain = eval( self.domain, (args or {}).copy()) if query: domain.append((obj._rec_name, 'ilike', '%'+query+'%')) for record in obj.search_read(cr, uid, domain=domain, fields=['write_date',obj._rec_name], context=context): if record.get(obj._rec_name, False): yield {'loc': (record['id'], record[obj._rec_name])} class PageConverter(werkzeug.routing.PathConverter): """ Only point of this converter is to bundle pages enumeration logic """ def generate(self, cr, uid, query=None, args={}, context=None): View = request.registry['ir.ui.view'] domain = [('page', '=', True)] query = query and query.startswith('website.') and query[8:] or query if query: domain += [('key', 'like', query)] views = View.search_read(cr, uid, domain, fields=['key', 'priority', 'write_date'], order='name', context=context) for view in views: xid = view['key'].startswith('website.') and view['key'][8:] or view['key'] # the 'page/homepage' url is indexed as '/', avoid aving the same page referenced twice # when we will have an url mapping mechanism, replace this by a rule: page/homepage --> / if xid=='homepage': continue record = {'loc': xid} if view['priority'] <> 16: record['__priority'] = min(round(view['priority'] / 32.0,1), 1) if view['write_date']: record['__lastmod'] = view['write_date'][:10] yield record
agpl-3.0
8,553,444,281,619,699,000
45.384615
143
0.57131
false
hpi-xnor/BMXNet
example/speech-demo/io_func/utils.py
25
5707
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys, subprocess, pickle, os, json, logging, socket import logging.config import datetime from . import info def getRunDir(): return os.path.dirname(os.path.realpath(sys.argv[0])) def setup_logger(logging_ini): if logging_ini is not None: print("Using custom logger") else: logging_ini = os.path.join(info.CONFIGS, 'logging.ini') logging.config.fileConfig(logging_ini) logger = logging.getLogger(__name__) logger.info("**************************************************") logger.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) logger.info("Host: " + str(socket.gethostname())) logger.info("Screen: " + os.getenv("STY", "unknown")) logger.info("PWD: " + os.getenv("PWD", "unknown")) logger.info("Cmd: " + str(sys.argv)) logger.info("**************************************************") def to_bool(obj): if str(obj).lower() in ["true", "1"]: return True elif str(obj).lower() in ["false", "0"]: return False else: raise Exception("to_bool: cannot convert to bool") def line_with_arg(line): line = line.strip() return line is not "" and not line.startswith("#") def parse_conv_spec(conv_spec, batch_size): # "1x29x29:100,5x5,p2x2:200,4x4,p2x2,f" conv_spec = conv_spec.replace('X', 'x') structure = conv_spec.split(':') conv_layer_configs = [] for i in range(1, len(structure)): config = {} elements = structure[i].split(',') if i == 1: input_dims = structure[i - 1].split('x') prev_map_number = int(input_dims[0]) prev_feat_dim_x = int(input_dims[1]) prev_feat_dim_y = int(input_dims[2]) else: prev_map_number = conv_layer_configs[-1]['output_shape'][1] prev_feat_dim_x = conv_layer_configs[-1]['output_shape'][2] prev_feat_dim_y = conv_layer_configs[-1]['output_shape'][3] current_map_number = int(elements[0]) filter_xy = elements[1].split('x') filter_size_x = int(filter_xy[0]) filter_size_y = int(filter_xy[1]) pool_xy = elements[2].replace('p','').replace('P','').split('x') pool_size_x = int(pool_xy[0]) pool_size_y = int(pool_xy[1]) output_dim_x = (prev_feat_dim_x - filter_size_x + 1) / pool_size_x output_dim_y = (prev_feat_dim_y - filter_size_y + 1) / pool_size_y config['input_shape'] = (batch_size, prev_map_number, prev_feat_dim_x, prev_feat_dim_y) config['filter_shape'] = (current_map_number, prev_map_number, filter_size_x, filter_size_y) config['poolsize'] = (pool_size_x, pool_size_y) config['output_shape'] = (batch_size, current_map_number, output_dim_x, output_dim_y) if len(elements) == 4 and elements[3] == 'f': config['flatten'] = True else: config['flatten'] = False conv_layer_configs.append(config) return conv_layer_configs def _relu(x): return x * (x > 0) def _capped_relu(x): return T.minimum(x * (x > 0), 6) def _linear(x): return x * 1.0 def parse_activation(act_str): print("***", act_str) if act_str == 'sigmoid': return T.nnet.sigmoid elif act_str == 'tanh': return T.tanh elif act_str == 'relu': return _relu elif act_str == 'capped_relu': return _capped_relu elif act_str == 'linear': return _linear return T.nnet.sigmoid def activation_to_txt(act_func): if act_func == T.nnet.sigmoid: return 'sigmoid' if act_func == T.tanh: return 'tanh' def parse_two_integers(argument_str): elements = argument_str.split(":") int_strs = elements[1].split(",") return int(int_strs[0]), int(int_strs[1]) """ Usage: command = 'mysqladmin create test -uroot -pmysqladmin12' for line in run_command(command): print(line) """ def run_command(command): fnull = open(os.devnull, 'w') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=fnull, shell=True) return p, iter(p.stdout.readline, b'') def pickle_load(filename): f = open(filename, "rb") try: obj = pickle.load(f) except Exception: f.close() f = open(filename, "rb") print("Not a pickled file... try to load as text format: " + filename) obj = json.load(f) f.close() return obj def pickle_save(obj, filename): f = open(filename + ".new", "wb") pickle.dump(obj, f) f.close() os.rename(filename + ".new", filename) def makedirs(path): if not os.path.exists(path): os.makedirs(path) def kahan_add(total, carry, inc): cs = T.add_no_assoc(carry, inc) s = T.add_no_assoc(total, cs) update_carry = T.sub(cs, T.sub(s, total)) update_total = s return update_total, update_carry
apache-2.0
-6,621,914,986,255,953,000
32.570588
100
0.59611
false
lagopus/ryu-lagopus-ext
ryu/services/protocols/bgp/info_base/base.py
4
43319
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defines some model classes related BGP. These class include types used in saving information sent/received over BGP sessions. """ import abc from abc import ABCMeta from abc import abstractmethod from copy import copy import logging import functools import netaddr import six from ryu.lib.packet.bgp import RF_IPv4_UC from ryu.lib.packet.bgp import RouteTargetMembershipNLRI from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES from ryu.lib.packet.bgp import BGPPathAttributeLocalPref from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH from ryu.services.protocols.bgp.base import OrderedDict from ryu.services.protocols.bgp.constants import VPN_TABLE from ryu.services.protocols.bgp.constants import VRF_TABLE from ryu.services.protocols.bgp.model import OutgoingRoute from ryu.services.protocols.bgp.processor import BPR_ONLY_PATH from ryu.services.protocols.bgp.processor import BPR_UNKNOWN LOG = logging.getLogger('bgpspeaker.info_base.base') @six.add_metaclass(ABCMeta) class Table(object): """A container for holding information about destination/prefixes. Routing information base for a particular afi/safi. This is a base class which should be sub-classed for different route family. A table can be uniquely identified by (Route Family, Scope Id). """ ROUTE_FAMILY = RF_IPv4_UC def __init__(self, scope_id, core_service, signal_bus): self._destinations = dict() # Scope in which this table exists. # If this table represents the VRF, then this could be a VPN ID. # For global/VPN tables this should be None self._scope_id = scope_id self._signal_bus = signal_bus self._core_service = core_service @property def route_family(self): return self.__class__.ROUTE_FAMILY @property def core_service(self): return self._core_service @property def scope_id(self): return self._scope_id @abstractmethod def _create_dest(self, nlri): """Creates destination specific for this table. Returns destination that stores information of paths to *nlri*. """ raise NotImplementedError() def values(self): return iter(self._destinations.values()) def insert(self, path): self._validate_path(path) self._validate_nlri(path.nlri) if path.is_withdraw: updated_dest = self._insert_withdraw(path) else: updated_dest = self._insert_path(path) return updated_dest def insert_sent_route(self, sent_route): self._validate_path(sent_route.path) dest = self._get_or_create_dest(sent_route.path.nlri) dest.add_sent_route(sent_route) def _insert_path(self, path): """Add new path to destination identified by given prefix. """ assert path.is_withdraw is False dest = self._get_or_create_dest(path.nlri) # Add given path to matching Dest. dest.add_new_path(path) # Return updated destination. return dest def _insert_withdraw(self, path): """Appends given path to withdraw list of Destination for given prefix. """ assert path.is_withdraw is True dest = self._get_or_create_dest(path.nlri) # Add given path to matching destination. dest.add_withdraw(path) # Return updated destination. return dest def cleanup_paths_for_peer(self, peer): """Remove old paths from whose source is `peer` Old paths have source version number that is less than current peer version number. Also removes sent paths to this peer. """ LOG.debug('Cleaning paths from table %s for peer %s', self, peer) for dest in self.values(): # Remove paths learned from this source paths_deleted = dest.remove_old_paths_from_source(peer) # Remove sent paths to this peer had_sent = dest.remove_sent_route(peer) if had_sent: LOG.debug('Removed sent route %s for %s', dest.nlri, peer) # If any paths are removed we enqueue respective destination for # future processing. if paths_deleted: self._signal_bus.dest_changed(dest) def clean_uninteresting_paths(self, interested_rts): """Cleans table of any path that do not have any RT in common with `interested_rts`. Parameters: - `interested_rts`: (set) of RT that are of interest/that need to be preserved """ LOG.debug('Cleaning table %s for given interested RTs %s', self, interested_rts) uninteresting_dest_count = 0 for dest in self.values(): added_withdraw = \ dest.withdraw_uninteresting_paths(interested_rts) if added_withdraw: self._signal_bus.dest_changed(dest) uninteresting_dest_count += 1 return uninteresting_dest_count def delete_dest_by_nlri(self, nlri): """Deletes the destination identified by given prefix. Returns the deleted destination if a match is found. If not match is found return None. """ self._validate_nlri(nlri) dest = self._get_dest(nlri) if dest: self._destinations.pop(dest) return dest def delete_dest(self, dest): del self._destinations[self._table_key(dest.nlri)] def _validate_nlri(self, nlri): """Validated *nlri* is the type that this table stores/supports. """ if not nlri or not (nlri.ROUTE_FAMILY == self.route_family): raise ValueError('Invalid Vpnv4 prefix given.') def _validate_path(self, path): """Check if given path is an instance of *Path*. Raises ValueError if given is not a instance of *Path*. """ if not path or not (path.route_family == self.route_family): raise ValueError('Invalid path. Expected instance of' ' Vpnv4 route family path, got %s.' % path) def _get_or_create_dest(self, nlri): table_key = self._table_key(nlri) dest = self._destinations.get(table_key) # If destination for given prefix does not exist we create it. if dest is None: dest = self._create_dest(nlri) self._destinations[table_key] = dest return dest def _get_dest(self, nlri): table_key = self._table_key(nlri) dest = self._destinations.get(table_key) return dest def is_for_vrf(self): """Returns true if this table instance represents a VRF. """ return self.scope_id is not None def __str__(self): return 'Table(scope_id: %s, rf: %s)' % (self.scope_id, self.route_family) @abstractmethod def _table_key(self, nlri): """Return a key that will uniquely identify this NLRI inside this table. """ raise NotImplementedError() class NonVrfPathProcessingMixin(object): """Mixin reacting to best-path selection algorithm on main table level. Intended to use with "Destination" subclasses. Applies to most of Destinations except for VrfDest because they are processed at VRF level, so different logic applies. """ def __init__(self): self._core_service = None # not assigned yet self._known_path_list = [] def _best_path_lost(self): self._best_path = None if self._sent_routes: # We have to send update-withdraw to all peers to whom old best # path was sent. for sent_route in self._sent_routes.values(): sent_path = sent_route.path withdraw_clone = sent_path.clone(for_withdrawal=True) outgoing_route = OutgoingRoute(withdraw_clone) sent_route.sent_peer.enque_outgoing_msg(outgoing_route) LOG.debug('Sending withdrawal to %s for %s', sent_route.sent_peer, outgoing_route) # Have to clear sent_route list for this destination as # best path is removed. self._sent_routes = {} def _new_best_path(self, new_best_path): old_best_path = self._best_path self._best_path = new_best_path LOG.debug('New best path selected for destination %s', self) # If old best path was withdrawn if (old_best_path and old_best_path not in self._known_path_list and self._sent_routes): # Have to clear sent_route list for this destination as # best path is removed. self._sent_routes = {} # Communicate that we have new best path to all qualifying # bgp-peers. pm = self._core_service.peer_manager pm.comm_new_best_to_bgp_peers(new_best_path) # withdraw old best path if old_best_path and self._sent_routes: for sent_route in self._sent_routes.values(): sent_path = sent_route.path withdraw_clone = sent_path.clone(for_withdrawal=True) outgoing_route = OutgoingRoute(withdraw_clone) sent_route.sent_peer.enque_outgoing_msg(outgoing_route) LOG.debug('Sending withdrawal to %s for %s', sent_route.sent_peer, outgoing_route) self._sent_routes = {} @six.add_metaclass(ABCMeta) class Destination(object): """State about a particular destination. For example, an IP prefix. This is the data-structure that is hung of the a routing information base table *Table*. """ ROUTE_FAMILY = RF_IPv4_UC def __init__(self, table, nlri): # Validate arguments. if table.route_family != self.__class__.ROUTE_FAMILY: raise ValueError('Table and destination route family ' 'do not match.') # Back-pointer to the table that contains this destination. self._table = table self._core_service = table.core_service self._nlri = nlri # List of all known processed paths, self._known_path_list = [] # List of new un-processed paths. self._new_path_list = [] # Pointer to best-path. One from the the known paths. self._best_path = None # Reason current best path was chosen as best path. self._best_path_reason = None # List of withdrawn paths. self._withdraw_list = [] # List of SentRoute objects. This is the Adj-Rib-Out for this # destination. (key/value: peer/sent_route) self._sent_routes = {} # This is an (optional) list of paths that were created as a # result of exporting this route to other tables. # self.exported_paths = None # Automatically generated # # On work queue for BGP processor. # self.next_dest_to_process # self.prev_dest_to_process @property def route_family(self): return self.__class__.ROUTE_FAMILY @property def nlri(self): return self._nlri @property def nlri_str(self): return self._nlri.formatted_nlri_str @property def best_path(self): return self._best_path @property def best_path_reason(self): return self._best_path_reason @property def known_path_list(self): return self._known_path_list[:] @property def sent_routes(self): return list(self._sent_routes.values()) def add_new_path(self, new_path): self._validate_path(new_path) self._new_path_list.append(new_path) def add_withdraw(self, withdraw): self._validate_path(withdraw) self._withdraw_list.append(withdraw) def add_sent_route(self, sent_route): self._sent_routes[sent_route.sent_peer] = sent_route def remove_sent_route(self, peer): if self.was_sent_to(peer): del self._sent_routes[peer] return True return False def was_sent_to(self, peer): if peer in self._sent_routes.keys(): return True return False def _process(self): """Calculate best path for this destination. A destination is processed when known paths to this destination has changed. We might have new paths or withdrawals of last known paths. Removes withdrawals and adds new learned paths from known path list. Uses bgp best-path calculation algorithm on new list of known paths to choose new best-path. Communicates best-path to core service. """ LOG.debug('Processing destination: %s', self) new_best_path, reason = self._process_paths() self._best_path_reason = reason if self._best_path == new_best_path: return if new_best_path is None: # we lost best path assert not self._known_path_list, repr(self._known_path_list) return self._best_path_lost() else: return self._new_best_path(new_best_path) @abstractmethod def _best_path_lost(self): raise NotImplementedError() @abstractmethod def _new_best_path(self, new_best_path): raise NotImplementedError() @classmethod def _validate_path(cls, path): if not path or path.route_family != cls.ROUTE_FAMILY: raise ValueError( 'Invalid path. Expected %s path got %s' % (cls.ROUTE_FAMILY, path) ) def process(self): self._process() if not self._known_path_list and not self._best_path: self._remove_dest_from_table() def _remove_dest_from_table(self): self._table.delete_dest(self) def remove_old_paths_from_source(self, source): """Removes known old paths from *source*. Returns *True* if any of the known paths were found to be old and removed/deleted. """ assert(source and hasattr(source, 'version_num')) removed_paths = [] # Iterate over the paths in reverse order as we want to delete paths # whose source is this peer. source_ver_num = source.version_num for path_idx in range(len(self._known_path_list) - 1, -1, -1): path = self._known_path_list[path_idx] if (path.source == source and path.source_version_num < source_ver_num): # If this peer is source of any paths, remove those path. del(self._known_path_list[path_idx]) removed_paths.append(path) return removed_paths def withdraw_if_sent_to(self, peer): """Sends a withdraw for this destination to given `peer`. Check the records if we indeed advertise this destination to given peer and if so, creates a withdraw for advertised route and sends it to the peer. Parameter: - `peer`: (Peer) peer to send withdraw to """ from ryu.services.protocols.bgp.peer import Peer if not isinstance(peer, Peer): raise TypeError('Currently we only support sending withdrawal' ' to instance of peer') sent_route = self._sent_routes.pop(peer, None) if not sent_route: return False sent_path = sent_route.path withdraw_clone = sent_path.clone(for_withdrawal=True) outgoing_route = OutgoingRoute(withdraw_clone) sent_route.sent_peer.enque_outgoing_msg(outgoing_route) return True def _process_paths(self): """Calculates best-path among known paths for this destination. Returns: - Best path Modifies destination's state related to stored paths. Removes withdrawn paths from known paths. Also, adds new paths to known paths. """ # First remove the withdrawn paths. # Note: If we want to support multiple paths per destination we may # have to maintain sent-routes per path. self._remove_withdrawals() # Have to select best-path from available paths and new paths. # If we do not have any paths, then we no longer have best path. if not self._known_path_list and len(self._new_path_list) == 1: # If we do not have any old but one new path # it becomes best path. self._known_path_list.append(self._new_path_list[0]) del(self._new_path_list[0]) return self._known_path_list[0], BPR_ONLY_PATH # If we have a new version of old/known path we use it and delete old # one. self._remove_old_paths() # Collect all new paths into known paths. self._known_path_list.extend(self._new_path_list) # Clear new paths as we copied them. del(self._new_path_list[:]) # If we do not have any paths to this destination, then we do not have # new best path. if not self._known_path_list: return None, BPR_UNKNOWN # Compute new best path current_best_path, reason = self._compute_best_known_path() return current_best_path, reason def _remove_withdrawals(self): """Removes withdrawn paths. Note: We may have disproportionate number of withdraws compared to know paths since not all paths get installed into the table due to bgp policy and we can receive withdraws for such paths and withdrawals may not be stopped by the same policies. """ LOG.debug('Removing %s withdrawals', len(self._withdraw_list)) # If we have no withdrawals, we have nothing to do. if not self._withdraw_list: return # If we have some withdrawals and no know-paths, it means it is safe to # delete these withdraws. if not self._known_path_list: LOG.debug('Found %s withdrawals for path(s) that did not get' ' installed.', len(self._withdraw_list)) del(self._withdraw_list[:]) return # If we have some known paths and some withdrawals, we find matches and # delete them first. matches = set() w_matches = set() # Match all withdrawals from destination paths. for withdraw in self._withdraw_list: match = None for path in self._known_path_list: # We have a match if the source are same. if path.source == withdraw.source: match = path matches.add(path) w_matches.add(withdraw) # One withdraw can remove only one path. break # We do no have any match for this withdraw. if not match: LOG.debug('No matching path for withdraw found, may be path ' 'was not installed into table: %s', withdraw) # If we have partial match. if len(matches) != len(self._withdraw_list): LOG.debug('Did not find match for some withdrawals. Number of ' 'matches(%s), number of withdrawals (%s)', len(matches), len(self._withdraw_list)) # Clear matching paths and withdrawals. for match in matches: self._known_path_list.remove(match) for w_match in w_matches: self._withdraw_list.remove(w_match) def _remove_old_paths(self): """Identifies which of known paths are old and removes them. Known paths will no longer have paths whose new version is present in new paths. """ new_paths = self._new_path_list known_paths = self._known_path_list for new_path in new_paths: old_paths = [] for path in known_paths: # Here we just check if source is same and not check if path # version num. as new_paths are implicit withdrawal of old # paths and when doing RouteRefresh (not EnhancedRouteRefresh) # we get same paths again. if new_path.source == path.source: old_paths.append(path) break for old_path in old_paths: known_paths.remove(old_path) LOG.debug('Implicit withdrawal of old path, since we have' ' learned new path from same source: %s', old_path) def _compute_best_known_path(self): """Computes the best path among known paths. Returns current best path among `known_paths`. """ if not self._known_path_list: from ryu.services.protocols.bgp.processor import BgpProcessorError raise BgpProcessorError(desc='Need at-least one known path to' ' compute best path') # We pick the first path as current best path. This helps in breaking # tie between two new paths learned in one cycle for which best-path # calculation steps lead to tie. current_best_path = self._known_path_list[0] best_path_reason = BPR_ONLY_PATH for next_path in self._known_path_list[1:]: from ryu.services.protocols.bgp.processor import compute_best_path # Compare next path with current best path. new_best_path, reason = \ compute_best_path(self._core_service.asn, current_best_path, next_path) best_path_reason = reason if new_best_path is not None: current_best_path = new_best_path return current_best_path, best_path_reason def withdraw_uninteresting_paths(self, interested_rts): """Withdraws paths that are no longer interesting. For all known paths that do not have any route target in common with given `interested_rts` we add a corresponding withdraw. Returns True if we added any withdraws. """ add_withdraws = False for path in self._known_path_list: if not path.has_rts_in(interested_rts): self.withdraw_path(path) add_withdraws = True return add_withdraws def withdraw_path(self, path): if path not in self.known_path_list: raise ValueError("Path not known, no need to withdraw") withdraw = path.clone(for_withdrawal=True) self._withdraw_list.append(withdraw) def to_dict(self): return {'table': str(self._table), 'nlri': str(self._nlri), 'paths': self._known_path_list[:], 'withdraws': self._get_num_withdraws()} def __str__(self): return ('Destination(table: %s, nlri: %s, paths: %s, withdraws: %s,' ' new paths: %s)' % (self._table, str(self._nlri), len(self._known_path_list), len(self._withdraw_list), len(self._new_path_list))) def _get_num_valid_paths(self): return len(self._known_path_list) def _get_num_withdraws(self): return len(self._withdraw_list) def sent_routes_by_peer(self, peer): """get sent routes corresponding to specified peer. Returns SentRoute list. """ result = [] for route in self._sent_routes.values(): if route.sent_peer == peer: result.append(route) return result def __lt__(self, other): return str(self) < str(other) def __le__(self, other): return str(self) <= str(other) def __eq__(self, other): return str(self) == str(other) def __ne__(self, other): return str(self) != str(other) def __gt__(self, other): return str(self) > str(other) def __ge__(self, other): return str(self) >= str(other) @six.add_metaclass(ABCMeta) class Path(object): """Represents a way of reaching an IP destination. Also contains other meta-data given to us by a specific source (such as a peer). """ __slots__ = ('_source', '_path_attr_map', '_nlri', '_source_version_num', '_exported_from', '_nexthop', 'next_path', 'prev_path', '_is_withdraw', 'med_set_by_target_neighbor') ROUTE_FAMILY = RF_IPv4_UC def __init__(self, source, nlri, src_ver_num, pattrs=None, nexthop=None, is_withdraw=False, med_set_by_target_neighbor=False): """Initializes Ipv4 path. If this path is not a withdraw, then path attribute and nexthop both should be provided. Parameters: - `source`: (Peer/str) source of this path. - `nlri`: (Vpnv4) Nlri instance for Vpnv4 route family. - `src_ver_num`: (int) version number of *source* when this path was learned. - `pattrs`: (OrderedDict) various path attributes for this path. - `nexthop`: (str) nexthop advertised for this path. - `is_withdraw`: (bool) True if this represents a withdrawal. """ self.med_set_by_target_neighbor = med_set_by_target_neighbor if nlri.ROUTE_FAMILY != self.__class__.ROUTE_FAMILY: raise ValueError('NLRI and Path route families do not' ' match (%s, %s).' % (nlri.ROUTE_FAMILY, self.__class__.ROUTE_FAMILY)) # Currently paths injected directly into VRF has only one source # src_peer can be None to denote NC else has to be instance of Peer. # Paths can be exported from one VRF and then imported into another # VRF, in such cases it source is denoted as string VPN_TABLE. if not (source is None or hasattr(source, 'version_num') or source in (VRF_TABLE, VPN_TABLE)): raise ValueError('Invalid or Unsupported source for path: %s' % source) # If this path is not a withdraw path, than it should have path- # attributes and nexthop. if not is_withdraw and not (pattrs and nexthop): raise ValueError('Need to provide nexthop and patattrs ' 'for path that is not a withdraw.') # The entity (peer) that gave us this path. self._source = source # Path attribute of this path. if pattrs: self._path_attr_map = copy(pattrs) else: self._path_attr_map = OrderedDict() # NLRI that this path represents. self._nlri = nlri # If given nlri is withdrawn. self._is_withdraw = is_withdraw # @see Source.version_num self._source_version_num = src_ver_num self._nexthop = nexthop # Automatically generated. # # self.next_path # self.prev_path # The Destination from which this path was exported, if any. self._exported_from = None @property def source_version_num(self): return self._source_version_num @property def source(self): return self._source @property def route_family(self): return self.__class__.ROUTE_FAMILY @property def nlri(self): return self._nlri @property def nlri_str(self): return self._nlri.formatted_nlri_str @property def is_withdraw(self): return self._is_withdraw @property def pathattr_map(self): return copy(self._path_attr_map) @property def nexthop(self): return self._nexthop def get_pattr(self, pattr_type, default=None): """Returns path attribute of given type. Returns None if we do not attribute of type *pattr_type*. """ return self._path_attr_map.get(pattr_type, default) def clone(self, for_withdrawal=False): pathattrs = None if not for_withdrawal: pathattrs = self.pathattr_map clone = self.__class__( self.source, self.nlri, self.source_version_num, pattrs=pathattrs, nexthop=self.nexthop, is_withdraw=for_withdrawal ) return clone def get_rts(self): extcomm_attr = self._path_attr_map.get( BGP_ATTR_TYPE_EXTENDED_COMMUNITIES) if extcomm_attr is None: rts = [] else: rts = extcomm_attr.rt_list return rts def has_rts_in(self, interested_rts): """Returns True if this `Path` has any `ExtCommunity` attribute route target common with `interested_rts`. """ assert isinstance(interested_rts, set) curr_rts = self.get_rts() # Add default RT to path RTs so that we match interest for peers who # advertised default RT curr_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT) return not interested_rts.isdisjoint(curr_rts) def is_local(self): return self._source is None def has_nexthop(self): return self._nexthop and self._nexthop not in ('0.0.0.0', '::') def __str__(self): return ( 'Path(source: %s, nlri: %s, source ver#: %s, ' 'path attrs.: %s, nexthop: %s, is_withdraw: %s)' % ( self._source, self._nlri, self._source_version_num, self._path_attr_map, self._nexthop, self._is_withdraw ) ) def __repr__(self): return ('Path(%s, %s, %s, %s, %s, %s)' % ( self._source, self._nlri, self._source_version_num, self._path_attr_map, self._nexthop, self._is_withdraw)) @six.add_metaclass(ABCMeta) class Filter(object): """Represents a general filter for in-bound and out-bound filter ================ ================================================== Attribute Description ================ ================================================== policy Filter.POLICY_PERMIT or Filter.POLICY_DENY ================ ================================================== """ ROUTE_FAMILY = RF_IPv4_UC POLICY_DENY = 0 POLICY_PERMIT = 1 def __init__(self, policy=POLICY_DENY): self._policy = policy @property def policy(self): return self._policy @abstractmethod def evaluate(self, path): """ This method evaluates the path. Returns this object's policy and the result of matching. If the specified prefix matches this object's prefix and ge and le condition, this method returns True as the matching result. ``path`` specifies the path. prefix must be string. """ raise NotImplementedError() @abstractmethod def clone(self): """ This method clones Filter object. Returns Filter object that has the same values with the original one. """ raise NotImplementedError() @functools.total_ordering class PrefixFilter(Filter): """ Used to specify a prefix for filter. We can create PrefixFilter object as follows:: prefix_filter = PrefixFilter('10.5.111.0/24', policy=PrefixFilter.POLICY_PERMIT) ================ ================================================== Attribute Description ================ ================================================== prefix A prefix used for this filter policy One of the following values. | PrefixFilter.POLICY.PERMIT | PrefixFilter.POLICY_DENY ge Prefix length that will be applied to this filter. ge means greater than or equal. le Prefix length that will be applied to this filter. le means less than or equal. ================ ================================================== For example, when PrefixFilter object is created as follows:: p = PrefixFilter('10.5.111.0/24', policy=PrefixFilter.POLICY_DENY, ge=26, le=28) Prefixes which match 10.5.111.0/24 and its length matches from 26 to 28 will be filtered. When this filter is used as an out-filter, it will stop sending the path to neighbor because of POLICY_DENY. When this filter is used as in-filter, it will stop importing the path to the global rib because of POLICY_DENY. If you specify POLICY_PERMIT, the path is sent to neighbor or imported to the global rib. If you don't want to send prefixes 10.5.111.64/26 and 10.5.111.32/27 and 10.5.111.16/28, and allow to send other 10.5.111.0's prefixes, you can do it by specifying as follows:: p = PrefixFilter('10.5.111.0/24', policy=PrefixFilter.POLICY_DENY, ge=26, le=28). """ def __init__(self, prefix, policy, ge=None, le=None): super(PrefixFilter, self).__init__(policy) self._prefix = prefix self._network = netaddr.IPNetwork(prefix) self._ge = ge self._le = le def __lt__(self, other): return self._network < other._network def __eq__(self, other): return self._network == other._network def __repr__(self): policy = 'PERMIT' \ if self._policy == self.POLICY_PERMIT else 'DENY' return 'PrefixFilter(prefix=%s,policy=%s,ge=%s,le=%s)'\ % (self._prefix, policy, self._ge, self._le) @property def prefix(self): return self._prefix @property def policy(self): return self._policy @property def ge(self): return self._ge @property def le(self): return self._le def evaluate(self, path): """ This method evaluates the prefix. Returns this object's policy and the result of matching. If the specified prefix matches this object's prefix and ge and le condition, this method returns True as the matching result. ``path`` specifies the path that has prefix. """ nlri = path.nlri result = False length = nlri.length net = netaddr.IPNetwork(nlri.prefix) if net in self._network: if self._ge is None and self._le is None: result = True elif self._ge is None and self._le: if length <= self._le: result = True elif self._ge and self._le is None: if self._ge <= length: result = True elif self._ge and self._le: if self._ge <= length <= self._le: result = True return self.policy, result def clone(self): """ This method clones PrefixFilter object. Returns PrefixFilter object that has the same values with the original one. """ return self.__class__(self.prefix, policy=self._policy, ge=self._ge, le=self._le) @functools.total_ordering class ASPathFilter(Filter): """ Used to specify a prefix for AS_PATH attribute. We can create ASPathFilter object as follows:: as_path_filter = ASPathFilter(65000,policy=ASPathFilter.TOP) ================ ================================================== Attribute Description ================ ================================================== as_number A AS number used for this filter policy One of the following values. | ASPathFilter.POLICY_TOP | ASPathFilter.POLICY_END | ASPathFilter.POLICY_INCLUDE | ASPathFilter.POLICY_NOT_INCLUDE ================ ================================================== Meaning of each policy is as follows: ================== ================================================== Policy Description ================== ================================================== POLICY_TOP Filter checks if the specified AS number is at the top of AS_PATH attribute. POLICY_END Filter checks is the specified AS number is at the last of AS_PATH attribute. POLICY_INCLUDE Filter checks if specified AS number exists in AS_PATH attribute. POLICY_NOT_INCLUDE Opposite to POLICY_INCLUDE. ================== ================================================== """ POLICY_TOP = 2 POLICY_END = 3 POLICY_INCLUDE = 4 POLICY_NOT_INCLUDE = 5 def __init__(self, as_number, policy): super(ASPathFilter, self).__init__(policy) self._as_number = as_number def __lt__(self, other): return self.as_number < other.as_number def __eq__(self, other): return self.as_number == other.as_number def __repr__(self): policy = 'TOP' if self._policy == self.POLICY_INCLUDE: policy = 'INCLUDE' elif self._policy == self.POLICY_NOT_INCLUDE: policy = 'NOT_INCLUDE' elif self._policy == self.POLICY_END: policy = 'END' return 'ASPathFilter(as_number=%s,policy=%s)'\ % (self._as_number, policy) @property def as_number(self): return self._as_number @property def policy(self): return self._policy def evaluate(self, path): """ This method evaluates as_path list. Returns this object's policy and the result of matching. If the specified AS number matches this object's AS number according to the policy, this method returns True as the matching result. ``path`` specifies the path. """ path_aspath = path.pathattr_map.get(BGP_ATTR_TYPE_AS_PATH) path_seg_list = path_aspath.path_seg_list if path_seg_list: path_seg = path_seg_list[0] else: path_seg = [] result = False LOG.debug("path_seg : %s", path_seg) if self.policy == ASPathFilter.POLICY_TOP: if len(path_seg) > 0 and path_seg[0] == self._as_number: result = True elif self.policy == ASPathFilter.POLICY_INCLUDE: for aspath in path_seg: LOG.debug("POLICY_INCLUDE as_number : %s", aspath) if aspath == self._as_number: result = True break elif self.policy == ASPathFilter.POLICY_END: if len(path_seg) > 0 and path_seg[-1] == self._as_number: result = True elif self.policy == ASPathFilter.POLICY_NOT_INCLUDE: if self._as_number not in path_seg: result = True return self.policy, result def clone(self): """ This method clones ASPathFilter object. Returns ASPathFilter object that has the same values with the original one. """ return self.__class__(self._as_number, policy=self._policy) class AttributeMap(object): """ This class is used to specify an attribute to add if the path matches filters. We can create AttributeMap object as follows:: pref_filter = PrefixFilter('192.168.103.0/30', PrefixFilter.POLICY_PERMIT) attribute_map = AttributeMap([pref_filter], AttributeMap.ATTR_LOCAL_PREF, 250) speaker.attribute_map_set('192.168.50.102', [attribute_map]) AttributeMap.ATTR_LOCAL_PREF means that 250 is set as a local preference value if nlri in the path matches pref_filter. ASPathFilter is also available as a filter. ASPathFilter checks if AS_PATH attribute in the path matches AS number in the filter. =================== ================================================== Attribute Description =================== ================================================== filters A list of filter. Each object should be a Filter class or its sub-class attr_type A type of attribute to map on filters. Currently AttributeMap.ATTR_LOCAL_PREF is available. attr_value A attribute value =================== ================================================== """ ATTR_LOCAL_PREF = '_local_pref' def __init__(self, filters, attr_type, attr_value): assert all(isinstance(f, Filter) for f in filters),\ 'all the items in filters must be an instance of Filter sub-class' self.filters = filters self.attr_type = attr_type self.attr_value = attr_value def evaluate(self, path): """ This method evaluates attributes of the path. Returns the cause and result of matching. Both cause and result are returned from filters that this object contains. ``path`` specifies the path. """ result = False cause = None for f in self.filters: cause, result = f.evaluate(path) if not result: break return cause, result def get_attribute(self): func = getattr(self, 'get' + self.attr_type) return func() def get_local_pref(self): local_pref_attr = BGPPathAttributeLocalPref(value=self.attr_value) return local_pref_attr def clone(self): """ This method clones AttributeMap object. Returns AttributeMap object that has the same values with the original one. """ cloned_filters = [f.clone() for f in self.filters] return self.__class__(cloned_filters, self.attr_type, self.attr_value) def __repr__(self): attr_type = 'LOCAL_PREF'\ if self.attr_type == self.ATTR_LOCAL_PREF else None filter_string = ','.join(repr(f) for f in self.filters) return ('AttributeMap(filters=[%s],' 'attribute_type=%s,' 'attribute_value=%s)' % (filter_string, attr_type, self.attr_value))
apache-2.0
-943,357,946,213,218,200
33.517131
79
0.573974
false
Alberto-Beralix/Beralix
i386-squashfs-root/usr/share/pyshared/twisted/cred/credentials.py
30
15450
# -*- test-case-name: twisted.test.test_newcred-*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from zope.interface import implements, Interface import hmac, time, random from twisted.python.hashlib import md5 from twisted.python.randbytes import secureRandom from twisted.cred._digest import calcResponse, calcHA1, calcHA2 from twisted.cred import error class ICredentials(Interface): """ I check credentials. Implementors _must_ specify which sub-interfaces of ICredentials to which it conforms, using zope.interface.implements(). """ class IUsernameDigestHash(ICredentials): """ This credential is used when a CredentialChecker has access to the hash of the username:realm:password as in an Apache .htdigest file. """ def checkHash(digestHash): """ @param digestHash: The hashed username:realm:password to check against. @return: C{True} if the credentials represented by this object match the given hash, C{False} if they do not, or a L{Deferred} which will be called back with one of these values. """ class IUsernameHashedPassword(ICredentials): """ I encapsulate a username and a hashed password. This credential is used when a hashed password is received from the party requesting authentication. CredentialCheckers which check this kind of credential must store the passwords in plaintext (or as password-equivalent hashes) form so that they can be hashed in a manner appropriate for the particular credentials class. @type username: C{str} @ivar username: The username associated with these credentials. """ def checkPassword(password): """ Validate these credentials against the correct password. @type password: C{str} @param password: The correct, plaintext password against which to check. @rtype: C{bool} or L{Deferred} @return: C{True} if the credentials represented by this object match the given password, C{False} if they do not, or a L{Deferred} which will be called back with one of these values. """ class IUsernamePassword(ICredentials): """ I encapsulate a username and a plaintext password. This encapsulates the case where the password received over the network has been hashed with the identity function (That is, not at all). The CredentialsChecker may store the password in whatever format it desires, it need only transform the stored password in a similar way before performing the comparison. @type username: C{str} @ivar username: The username associated with these credentials. @type password: C{str} @ivar password: The password associated with these credentials. """ def checkPassword(password): """ Validate these credentials against the correct password. @type password: C{str} @param password: The correct, plaintext password against which to check. @rtype: C{bool} or L{Deferred} @return: C{True} if the credentials represented by this object match the given password, C{False} if they do not, or a L{Deferred} which will be called back with one of these values. """ class IAnonymous(ICredentials): """ I am an explicitly anonymous request for access. """ class DigestedCredentials(object): """ Yet Another Simple HTTP Digest authentication scheme. """ implements(IUsernameHashedPassword, IUsernameDigestHash) def __init__(self, username, method, realm, fields): self.username = username self.method = method self.realm = realm self.fields = fields def checkPassword(self, password): """ Verify that the credentials represented by this object agree with the given plaintext C{password} by hashing C{password} in the same way the response hash represented by this object was generated and comparing the results. """ response = self.fields.get('response') uri = self.fields.get('uri') nonce = self.fields.get('nonce') cnonce = self.fields.get('cnonce') nc = self.fields.get('nc') algo = self.fields.get('algorithm', 'md5').lower() qop = self.fields.get('qop', 'auth') expected = calcResponse( calcHA1(algo, self.username, self.realm, password, nonce, cnonce), calcHA2(algo, self.method, uri, qop, None), algo, nonce, nc, cnonce, qop) return expected == response def checkHash(self, digestHash): """ Verify that the credentials represented by this object agree with the credentials represented by the I{H(A1)} given in C{digestHash}. @param digestHash: A precomputed H(A1) value based on the username, realm, and password associate with this credentials object. """ response = self.fields.get('response') uri = self.fields.get('uri') nonce = self.fields.get('nonce') cnonce = self.fields.get('cnonce') nc = self.fields.get('nc') algo = self.fields.get('algorithm', 'md5').lower() qop = self.fields.get('qop', 'auth') expected = calcResponse( calcHA1(algo, None, None, None, nonce, cnonce, preHA1=digestHash), calcHA2(algo, self.method, uri, qop, None), algo, nonce, nc, cnonce, qop) return expected == response class DigestCredentialFactory(object): """ Support for RFC2617 HTTP Digest Authentication @cvar CHALLENGE_LIFETIME_SECS: The number of seconds for which an opaque should be valid. @type privateKey: C{str} @ivar privateKey: A random string used for generating the secure opaque. @type algorithm: C{str} @param algorithm: Case insensitive string specifying the hash algorithm to use. Must be either C{'md5'} or C{'sha'}. C{'md5-sess'} is B{not} supported. @type authenticationRealm: C{str} @param authenticationRealm: case sensitive string that specifies the realm portion of the challenge """ CHALLENGE_LIFETIME_SECS = 15 * 60 # 15 minutes scheme = "digest" def __init__(self, algorithm, authenticationRealm): self.algorithm = algorithm self.authenticationRealm = authenticationRealm self.privateKey = secureRandom(12) def getChallenge(self, address): """ Generate the challenge for use in the WWW-Authenticate header. @param address: The client address to which this challenge is being sent. @return: The C{dict} that can be used to generate a WWW-Authenticate header. """ c = self._generateNonce() o = self._generateOpaque(c, address) return {'nonce': c, 'opaque': o, 'qop': 'auth', 'algorithm': self.algorithm, 'realm': self.authenticationRealm} def _generateNonce(self): """ Create a random value suitable for use as the nonce parameter of a WWW-Authenticate challenge. @rtype: C{str} """ return secureRandom(12).encode('hex') def _getTime(self): """ Parameterize the time based seed used in C{_generateOpaque} so we can deterministically unittest it's behavior. """ return time.time() def _generateOpaque(self, nonce, clientip): """ Generate an opaque to be returned to the client. This is a unique string that can be returned to us and verified. """ # Now, what we do is encode the nonce, client ip and a timestamp in the # opaque value with a suitable digest. now = str(int(self._getTime())) if clientip is None: clientip = '' key = "%s,%s,%s" % (nonce, clientip, now) digest = md5(key + self.privateKey).hexdigest() ekey = key.encode('base64') return "%s-%s" % (digest, ekey.replace('\n', '')) def _verifyOpaque(self, opaque, nonce, clientip): """ Given the opaque and nonce from the request, as well as the client IP that made the request, verify that the opaque was generated by us. And that it's not too old. @param opaque: The opaque value from the Digest response @param nonce: The nonce value from the Digest response @param clientip: The remote IP address of the client making the request or C{None} if the request was submitted over a channel where this does not make sense. @return: C{True} if the opaque was successfully verified. @raise error.LoginFailed: if C{opaque} could not be parsed or contained the wrong values. """ # First split the digest from the key opaqueParts = opaque.split('-') if len(opaqueParts) != 2: raise error.LoginFailed('Invalid response, invalid opaque value') if clientip is None: clientip = '' # Verify the key key = opaqueParts[1].decode('base64') keyParts = key.split(',') if len(keyParts) != 3: raise error.LoginFailed('Invalid response, invalid opaque value') if keyParts[0] != nonce: raise error.LoginFailed( 'Invalid response, incompatible opaque/nonce values') if keyParts[1] != clientip: raise error.LoginFailed( 'Invalid response, incompatible opaque/client values') try: when = int(keyParts[2]) except ValueError: raise error.LoginFailed( 'Invalid response, invalid opaque/time values') if (int(self._getTime()) - when > DigestCredentialFactory.CHALLENGE_LIFETIME_SECS): raise error.LoginFailed( 'Invalid response, incompatible opaque/nonce too old') # Verify the digest digest = md5(key + self.privateKey).hexdigest() if digest != opaqueParts[0]: raise error.LoginFailed('Invalid response, invalid opaque value') return True def decode(self, response, method, host): """ Decode the given response and attempt to generate a L{DigestedCredentials} from it. @type response: C{str} @param response: A string of comma seperated key=value pairs @type method: C{str} @param method: The action requested to which this response is addressed (GET, POST, INVITE, OPTIONS, etc). @type host: C{str} @param host: The address the request was sent from. @raise error.LoginFailed: If the response does not contain a username, a nonce, an opaque, or if the opaque is invalid. @return: L{DigestedCredentials} """ def unq(s): if s[0] == s[-1] == '"': return s[1:-1] return s response = ' '.join(response.splitlines()) parts = response.split(',') auth = {} for (k, v) in [p.split('=', 1) for p in parts]: auth[k.strip()] = unq(v.strip()) username = auth.get('username') if not username: raise error.LoginFailed('Invalid response, no username given.') if 'opaque' not in auth: raise error.LoginFailed('Invalid response, no opaque given.') if 'nonce' not in auth: raise error.LoginFailed('Invalid response, no nonce given.') # Now verify the nonce/opaque values for this client if self._verifyOpaque(auth.get('opaque'), auth.get('nonce'), host): return DigestedCredentials(username, method, self.authenticationRealm, auth) class CramMD5Credentials: implements(IUsernameHashedPassword) challenge = '' response = '' def __init__(self, host=None): self.host = host def getChallenge(self): if self.challenge: return self.challenge # The data encoded in the first ready response contains an # presumptively arbitrary string of random digits, a timestamp, and # the fully-qualified primary host name of the server. The syntax of # the unencoded form must correspond to that of an RFC 822 'msg-id' # [RFC822] as described in [POP3]. # -- RFC 2195 r = random.randrange(0x7fffffff) t = time.time() self.challenge = '<%d.%d@%s>' % (r, t, self.host) return self.challenge def setResponse(self, response): self.username, self.response = response.split(None, 1) def moreChallenges(self): return False def checkPassword(self, password): verify = hmac.HMAC(password, self.challenge).hexdigest() return verify == self.response class UsernameHashedPassword: implements(IUsernameHashedPassword) def __init__(self, username, hashed): self.username = username self.hashed = hashed def checkPassword(self, password): return self.hashed == password class UsernamePassword: implements(IUsernamePassword) def __init__(self, username, password): self.username = username self.password = password def checkPassword(self, password): return self.password == password class Anonymous: implements(IAnonymous) class ISSHPrivateKey(ICredentials): """ L{ISSHPrivateKey} credentials encapsulate an SSH public key to be checked against a user's private key. @ivar username: The username associated with these credentials. @type username: C{str} @ivar algName: The algorithm name for the blob. @type algName: C{str} @ivar blob: The public key blob as sent by the client. @type blob: C{str} @ivar sigData: The data the signature was made from. @type sigData: C{str} @ivar signature: The signed data. This is checked to verify that the user owns the private key. @type signature: C{str} or C{NoneType} """ class SSHPrivateKey: implements(ISSHPrivateKey) def __init__(self, username, algName, blob, sigData, signature): self.username = username self.algName = algName self.blob = blob self.sigData = sigData self.signature = signature class IPluggableAuthenticationModules(ICredentials): """I encapsulate the authentication of a user via PAM (Pluggable Authentication Modules. I use PyPAM (available from http://www.tummy.com/Software/PyPam/index.html). @ivar username: The username for the user being logged in. @ivar pamConversion: A function that is called with a list of tuples (message, messageType). See the PAM documentation for the meaning of messageType. The function returns a Deferred which will fire with a list of (response, 0), one for each message. The 0 is currently unused, but is required by the PAM library. """ class PluggableAuthenticationModules: implements(IPluggableAuthenticationModules) def __init__(self, username, pamConversion): self.username = username self.pamConversion = pamConversion
gpl-3.0
9,024,818,655,870,962,000
30.987578
80
0.633074
false
aperigault/ansible
test/units/modules/crypto/test_luks_device.py
23
8409
import pytest from ansible.modules.crypto import luks_device class DummyModule(object): # module to mock AnsibleModule class def __init__(self): self.params = dict() def fail_json(self, msg=""): raise ValueError(msg) def get_bin_path(self, command, dummy): return command # ===== Handler & CryptHandler methods tests ===== def test_generate_luks_name(monkeypatch): module = DummyModule() monkeypatch.setattr(luks_device.Handler, "_run_command", lambda x, y: [0, "UUID", ""]) crypt = luks_device.CryptHandler(module) assert crypt.generate_luks_name("/dev/dummy") == "luks-UUID" def test_get_container_name_by_device(monkeypatch): module = DummyModule() monkeypatch.setattr(luks_device.Handler, "_run_command", lambda x, y: [0, "crypt container_name", ""]) crypt = luks_device.CryptHandler(module) assert crypt.get_container_name_by_device("/dev/dummy") == "container_name" def test_get_container_device_by_name(monkeypatch): module = DummyModule() monkeypatch.setattr(luks_device.Handler, "_run_command", lambda x, y: [0, "device: /dev/luksdevice", ""]) crypt = luks_device.CryptHandler(module) assert crypt.get_container_device_by_name("dummy") == "/dev/luksdevice" def test_run_luks_remove(monkeypatch): def run_command_check(self, command): # check that wipefs command is actually called assert command[0] == "wipefs" return [0, "", ""] module = DummyModule() monkeypatch.setattr(luks_device.CryptHandler, "get_container_name_by_device", lambda x, y: None) monkeypatch.setattr(luks_device.Handler, "_run_command", run_command_check) crypt = luks_device.CryptHandler(module) crypt.run_luks_remove("dummy") # ===== ConditionsHandler methods data and tests ===== # device, key, state, is_luks, expected LUKS_CREATE_DATA = ( ("dummy", "key", "present", False, True), (None, "key", "present", False, False), ("dummy", None, "present", False, False), ("dummy", "key", "absent", False, False), ("dummy", "key", "opened", True, False), ("dummy", "key", "closed", True, False), ("dummy", "key", "present", True, False)) # device, state, is_luks, expected LUKS_REMOVE_DATA = ( ("dummy", "absent", True, True), (None, "absent", True, False), ("dummy", "present", True, False), ("dummy", "absent", False, False)) # device, key, state, name, name_by_dev, expected LUKS_OPEN_DATA = ( ("dummy", "key", "present", "name", None, False), ("dummy", "key", "absent", "name", None, False), ("dummy", "key", "closed", "name", None, False), ("dummy", "key", "opened", "name", None, True), (None, "key", "opened", "name", None, False), ("dummy", None, "opened", "name", None, False), ("dummy", "key", "opened", "name", "name", False), ("dummy", "key", "opened", "beer", "name", "exception")) # device, dev_by_name, name, name_by_dev, state, expected LUKS_CLOSE_DATA = ( ("dummy", "dummy", "name", "name", "present", False), ("dummy", "dummy", "name", "name", "absent", False), ("dummy", "dummy", "name", "name", "opened", False), ("dummy", "dummy", "name", "name", "closed", True), (None, "dummy", "name", "name", "closed", True), ("dummy", "dummy", None, "name", "closed", True), (None, "dummy", None, "name", "closed", False)) # device, key, new_key, state, expected LUKS_ADD_KEY_DATA = ( ("dummy", "key", "new_key", "present", True), (None, "key", "new_key", "present", False), ("dummy", None, "new_key", "present", False), ("dummy", "key", None, "present", False), ("dummy", "key", "new_key", "absent", "exception")) # device, remove_key, state, expected LUKS_REMOVE_KEY_DATA = ( ("dummy", "key", "present", True), (None, "key", "present", False), ("dummy", None, "present", False), ("dummy", "key", "absent", "exception")) @pytest.mark.parametrize("device, keyfile, state, is_luks, expected", ((d[0], d[1], d[2], d[3], d[4]) for d in LUKS_CREATE_DATA)) def test_luks_create(device, keyfile, state, is_luks, expected, monkeypatch): module = DummyModule() module.params["device"] = device module.params["keyfile"] = keyfile module.params["state"] = state monkeypatch.setattr(luks_device.CryptHandler, "is_luks", lambda x, y: is_luks) crypt = luks_device.CryptHandler(module) conditions = luks_device.ConditionsHandler(module, crypt) assert conditions.luks_create() == expected @pytest.mark.parametrize("device, state, is_luks, expected", ((d[0], d[1], d[2], d[3]) for d in LUKS_REMOVE_DATA)) def test_luks_remove(device, state, is_luks, expected, monkeypatch): module = DummyModule() module.params["device"] = device module.params["state"] = state monkeypatch.setattr(luks_device.CryptHandler, "is_luks", lambda x, y: is_luks) crypt = luks_device.CryptHandler(module) conditions = luks_device.ConditionsHandler(module, crypt) assert conditions.luks_remove() == expected @pytest.mark.parametrize("device, keyfile, state, name, " "name_by_dev, expected", ((d[0], d[1], d[2], d[3], d[4], d[5]) for d in LUKS_OPEN_DATA)) def test_luks_open(device, keyfile, state, name, name_by_dev, expected, monkeypatch): module = DummyModule() module.params["device"] = device module.params["keyfile"] = keyfile module.params["state"] = state module.params["name"] = name monkeypatch.setattr(luks_device.CryptHandler, "get_container_name_by_device", lambda x, y: name_by_dev) crypt = luks_device.CryptHandler(module) conditions = luks_device.ConditionsHandler(module, crypt) try: assert conditions.luks_open() == expected except ValueError: assert expected == "exception" @pytest.mark.parametrize("device, dev_by_name, name, name_by_dev, " "state, expected", ((d[0], d[1], d[2], d[3], d[4], d[5]) for d in LUKS_CLOSE_DATA)) def test_luks_close(device, dev_by_name, name, name_by_dev, state, expected, monkeypatch): module = DummyModule() module.params["device"] = device module.params["name"] = name module.params["state"] = state monkeypatch.setattr(luks_device.CryptHandler, "get_container_name_by_device", lambda x, y: name_by_dev) monkeypatch.setattr(luks_device.CryptHandler, "get_container_device_by_name", lambda x, y: dev_by_name) crypt = luks_device.CryptHandler(module) conditions = luks_device.ConditionsHandler(module, crypt) assert conditions.luks_close() == expected @pytest.mark.parametrize("device, keyfile, new_keyfile, state, expected", ((d[0], d[1], d[2], d[3], d[4]) for d in LUKS_ADD_KEY_DATA)) def test_luks_add_key(device, keyfile, new_keyfile, state, expected, monkeypatch): module = DummyModule() module.params["device"] = device module.params["keyfile"] = keyfile module.params["new_keyfile"] = new_keyfile module.params["state"] = state conditions = luks_device.ConditionsHandler(module, module) try: assert conditions.luks_add_key() == expected except ValueError: assert expected == "exception" @pytest.mark.parametrize("device, remove_keyfile, state, expected", ((d[0], d[1], d[2], d[3]) for d in LUKS_REMOVE_KEY_DATA)) def test_luks_remove_key(device, remove_keyfile, state, expected, monkeypatch): module = DummyModule() module.params["device"] = device module.params["remove_keyfile"] = remove_keyfile module.params["state"] = state conditions = luks_device.ConditionsHandler(module, module) try: assert conditions.luks_remove_key() == expected except ValueError: assert expected == "exception"
gpl-3.0
-7,181,695,188,217,535,000
36.373333
82
0.587941
false
vitan/django
tests/test_client_regress/urls.py
25
2463
from django.conf.urls import include, url from django.views.generic import RedirectView from . import views urlpatterns = [ url(r'', include('test_client.urls')), url(r'^no_template_view/$', views.no_template_view), url(r'^staff_only/$', views.staff_only_view), url(r'^get_view/$', views.get_view), url(r'^request_data/$', views.request_data), url(r'^request_data_extended/$', views.request_data, {'template': 'extended.html', 'data': 'bacon'}), url(r'^arg_view/(?P<name>.+)/$', views.view_with_argument, name='arg_view'), url(r'^nested_view/$', views.nested_view, name='nested_view'), url(r'^login_protected_redirect_view/$', views.login_protected_redirect_view), url(r'^redirects/$', RedirectView.as_view(url='/redirects/further/')), url(r'^redirects/further/$', RedirectView.as_view(url='/redirects/further/more/')), url(r'^redirects/further/more/$', RedirectView.as_view(url='/no_template_view/')), url(r'^redirect_to_non_existent_view/$', RedirectView.as_view(url='/non_existent_view/')), url(r'^redirect_to_non_existent_view2/$', RedirectView.as_view(url='/redirect_to_non_existent_view/')), url(r'^redirect_to_self/$', RedirectView.as_view(url='/redirect_to_self/')), url(r'^redirect_to_self_with_changing_query_view/$', views.redirect_to_self_with_changing_query_view), url(r'^circular_redirect_1/$', RedirectView.as_view(url='/circular_redirect_2/')), url(r'^circular_redirect_2/$', RedirectView.as_view(url='/circular_redirect_3/')), url(r'^circular_redirect_3/$', RedirectView.as_view(url='/circular_redirect_1/')), url(r'^redirect_other_host/$', RedirectView.as_view(url='https://otherserver:8443/no_template_view/')), url(r'^set_session/$', views.set_session_view), url(r'^check_session/$', views.check_session_view), url(r'^request_methods/$', views.request_methods_view), url(r'^check_unicode/$', views.return_unicode), url(r'^check_binary/$', views.return_undecodable_binary), url(r'^parse_unicode_json/$', views.return_json_file), url(r'^check_headers/$', views.check_headers), url(r'^check_headers_redirect/$', RedirectView.as_view(url='/check_headers/')), url(r'^body/$', views.body), url(r'^read_all/$', views.read_all), url(r'^read_buffer/$', views.read_buffer), url(r'^request_context_view/$', views.request_context_view), url(r'^render_template_multiple_times/$', views.render_template_multiple_times), ]
bsd-3-clause
1,625,592,356,221,824,300
59.073171
107
0.672757
false
diedthreetimes/VCrash
pybindgen-0.15.0.795/.waf-1.5.9-0c853694b62ef4240caa9158a9f2573d/wafadmin/Logs.py
1
2494
#! /usr/bin/env python # encoding: utf-8 import os,re,logging,traceback,sys from Constants import* zones='' verbose=0 colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;91m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',} got_tty=not os.environ.get('TERM','dumb')in['dumb','emacs'] if got_tty: try: got_tty=sys.stderr.isatty() except AttributeError: got_tty=False import Utils if not got_tty or sys.platform=='win32'or'NOCOLOR'in os.environ: colors_lst['USE']=False def get_color(cl): if not colors_lst['USE']:return'' return colors_lst.get(cl,'') class foo(object): def __getattr__(self,a): return get_color(a) def __call__(self,a): return get_color(a) colors=foo() re_log=re.compile(r'(\w+): (.*)',re.M) class log_filter(logging.Filter): def __init__(self,name=None): pass def filter(self,rec): rec.c1=colors.PINK rec.c2=colors.NORMAL rec.zone=rec.module if rec.levelno>=logging.INFO: if rec.levelno>=logging.ERROR: rec.c1=colors.RED elif rec.levelno>=logging.WARNING: rec.c1=colors.YELLOW else: rec.c1=colors.GREEN return True zone='' m=re_log.match(rec.msg) if m: zone=rec.zone=m.group(1) rec.msg=m.group(2) if zones: return getattr(rec,'zone','')in zones or'*'in zones elif not verbose>2: return False return True class formatter(logging.Formatter): def __init__(self): logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT) def format(self,rec): if rec.levelno>=logging.WARNING or rec.levelno==logging.INFO: try: return'%s%s%s'%(rec.c1,rec.msg.decode('utf-8'),rec.c2) except: return rec.c1+rec.msg+rec.c2 return logging.Formatter.format(self,rec) def debug(msg): if verbose: msg=msg.replace('\n',' ') logging.debug(msg) def error(msg): logging.error(msg) if verbose>1: if isinstance(msg,Utils.WafError): st=msg.stack else: st=traceback.extract_stack() if st: st=st[:-1] buf=[] for filename,lineno,name,line in st: buf.append(' File "%s", line %d, in %s'%(filename,lineno,name)) if line: buf.append(' %s'%line.strip()) if buf:logging.error("\n".join(buf)) warn=logging.warn info=logging.info def init_log(): log=logging.getLogger() log.handlers=[] log.filters=[] hdlr=logging.StreamHandler() hdlr.setFormatter(formatter()) log.addHandler(hdlr) log.addFilter(log_filter()) log.setLevel(logging.DEBUG) init_log()
gpl-2.0
-1,031,789,942,086,977,700
25.531915
228
0.668805
false
PrairieLearn/PrairieLearn
exampleCourse/elements/clickable-image/clickable-image.py
1
3105
import chevron import lxml.html import prairielearn as pl import random def prepare(element_html, data): element = lxml.html.fragment_fromstring(element_html) pl.check_attribs(element, required_attribs=['answers-name'], optional_attribs=[]) name = pl.get_string_attrib(element, 'answers-name') number = random.randint(0,9) data['params'][name] = number data['correct_answers'][name] = number def render(element_html, data): # Grab the name of the element (name of the hidden input tag), and generate a unique UUID # Each element on the page has its own UUID to prevent the JavaScript of other elements from interfering element = lxml.html.fragment_fromstring(element_html) name = pl.get_string_attrib(element, 'answers-name') uuid = pl.get_uuid() if data['panel'] == 'question': html_params = { 'question': True, 'number': data['params'][name], 'answers_name': name, 'image_url': data['options']['client_files_element_url'] + '/block_i.png', 'uuid': uuid } elif data['panel'] == 'submission': feedback = data['partial_scores'][name].get('feedback', None) html_params = { 'submission': True, 'submitted': data["raw_submitted_answers"][name], 'feedback': feedback } elif data['panel'] == 'answer': html_params = { 'answer': True, 'correct': data["correct_answers"][name] } with open('clickable-image.mustache', 'r') as f: return chevron.render(f, html_params).strip() def parse(element_html, data): element = lxml.html.fragment_fromstring(element_html) name = pl.get_string_attrib(element, 'answers-name') # Grab the number of clicks in the hidden field and put it into submitted answers # Each "input" field is automatically saved into "raw_submitted_answers" data["submitted_answers"][name] = int(data["raw_submitted_answers"][name]) def grade(element_html, data): # Get the name of the element and the weight for this answer element = lxml.html.fragment_fromstring(element_html) name = pl.get_string_attrib(element, 'answers-name') weight = pl.get_float_attrib(element, 'weight', 1.0) # Get the number of submitted clicks and the correct number of clicks submitted_answer = data["submitted_answers"][name] correct_answer = data["correct_answers"][name] score = 0.0 feedback = None # Grade the actual number of clicks if submitted_answer == correct_answer: score = 1.0 elif submitted_answer == correct_answer - 1: score = 0.75 feedback = 'Your number was one too small.' elif submitted_answer == correct_answer + 1: score = 0.5 feedback = 'Your number was one too large.' else: score = 0 feedback = "You didn't click on the image the correct number of times" # Put the score, weight, and feedback into the data object data['partial_scores'][name] = {'score': score, 'weight': weight, 'feedback': feedback}
agpl-3.0
-8,148,477,353,597,223,000
35.964286
108
0.639291
false
SepehrMN/nest-simulator
pynest/nest/tests/test_stdp_multiplicity.py
15
9321
# -*- coding: utf-8 -*- # # test_stdp_multiplicity.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. # This script tests the parrot_neuron in NEST. import nest import unittest import math import numpy as np @nest.ll_api.check_stack class StdpSpikeMultiplicity(unittest.TestCase): """ Test correct handling of spike multiplicity in STDP. This test originated from work on issue #77. Concerning the definition of STDP for multiplicity > 1, consider the following (see also @flinz on #82): A plastic synapse is created between two parrot neurons. As reference case, we use two precise spiking parrot neurons driven by spike trains n*h - k*delta with delta < h, and 1 < k < K such that K delta < h, i.e., we get K spikes in each time step (h is resolution). If we transmit the same spike trains via plain parrot neurons, they will be handled as spikes with multiplicity K. Then, in the limit delta -> 0, the weight changes observed in the synapse between the precise parrots shall converge to the weight changes observed between the plain parrots. We test the resulting weights as follows: 1. Weights obtained using parrot_neuron must be identical independent of delta, since in this case all spikes are at the end of the step, i.e., all spikes have identical times independent of delta. 2. We choose delta values that are decrease by factors of 2. The plasticity rules depend on spike-time differences through exp(dT / tau) where dT is the time between pre- and postsynaptic spikes. We construct pre- and postsynaptic spike times so that dT = pre_post_shift + m * delta with m * delta < resolution << pre_post_shift. The time-dependence of the plasticity rule is therefore to good approximation linear in delta. We can thus test as follows: Let w_pl be the weight obtained with the plain parrot, and w_ps_j the weight obtained with the precise parrot for delta_j = delta0 / 2^j. Then, ( w_ps_{j+1} - w_pl ) / ( w_ps_j - w_pl ) ~ 0.5 for all j i.e., the difference between plain and precise weights halves each time delta is halved. """ def run_protocol(self, pre_post_shift): """ Create network and simulate for each delta value. Returns a dict with the synaptic weight at end of simulation for plain and precise parrots, one weight per delta value. All values for the plain parrot case should be identical, and the values for the precise parrot case should converge to that value for delta -> 0. All delta values must fulfill multiplicity * delta < resolution / 2 so that in the plain case off-grid spike times are rounded up to the end of the step and thus belong to the same step as the corresponding precise spikes. :param pre_post_shift: Delay between pre- and postsynaptic trains :returns: {'parrot': [<weights>], 'parrot_ps': [<weights>]} """ multiplicity = 2**3 resolution = 2.**-4 tics_per_ms = 1. / resolution * multiplicity * 4 deltas = [resolution / multiplicity / 2**m for m in range(2, 10)] delay = 1. # k spikes will be emitted at these two times pre_spike_times_base = [100., 200.] nest.set_verbosity("M_WARNING") post_weights = {'parrot': [], 'parrot_ps': []} for delta in deltas: assert multiplicity * delta < resolution / 2., "Test inconsistent." nest.ResetKernel() nest.SetKernelStatus({'tics_per_ms': tics_per_ms, 'resolution': resolution}) pre_times = sorted(t_base - k * delta for t_base in pre_spike_times_base for k in range(multiplicity)) post_times = [pre_time + pre_post_shift for pre_time in pre_times] # create spike_generators with these times pre_sg = nest.Create("spike_generator", params={"spike_times": pre_times, 'allow_offgrid_times': True}) post_sg = nest.Create("spike_generator", params={"spike_times": post_times, 'allow_offgrid_times': True}) pre_sg_ps = nest.Create("spike_generator", params={"spike_times": pre_times, 'precise_times': True}) post_sg_ps = nest.Create("spike_generator", params={"spike_times": post_times, 'precise_times': True}) # create parrot neurons and connect spike_generators pre_parrot = nest.Create("parrot_neuron") post_parrot = nest.Create("parrot_neuron") pre_parrot_ps = nest.Create("parrot_neuron_ps") post_parrot_ps = nest.Create("parrot_neuron_ps") nest.Connect(pre_sg, pre_parrot, syn_spec={"delay": delay}) nest.Connect(post_sg, post_parrot, syn_spec={"delay": delay}) nest.Connect(pre_sg_ps, pre_parrot_ps, syn_spec={"delay": delay}) nest.Connect(post_sg_ps, post_parrot_ps, syn_spec={"delay": delay}) # create spike recorder --- debugging only spikes = nest.Create("spike_recorder") nest.Connect( pre_parrot + post_parrot + pre_parrot_ps + post_parrot_ps, spikes ) # connect both parrot neurons with a stdp synapse onto port 1 # thereby spikes transmitted through the stdp connection are # not repeated postsynaptically. nest.Connect( pre_parrot, post_parrot, syn_spec={'synapse_model': 'stdp_synapse', 'receptor_type': 1}) nest.Connect( pre_parrot_ps, post_parrot_ps, syn_spec={'synapse_model': 'stdp_synapse', 'receptor_type': 1}) # get STDP synapse and weight before protocol syn = nest.GetConnections(source=pre_parrot, synapse_model="stdp_synapse") w_pre = syn.get('weight') syn_ps = nest.GetConnections(source=pre_parrot_ps, synapse_model="stdp_synapse") w_pre_ps = syn_ps.get('weight') sim_time = max(pre_times + post_times) + 5 * delay nest.Simulate(sim_time) # get weight post protocol w_post = syn.get('weight') w_post_ps = syn_ps.get('weight') assert w_post != w_pre, "Plain parrot weight did not change." assert w_post_ps != w_pre_ps, "Precise parrot \ weight did not change." post_weights['parrot'].append(w_post) post_weights['parrot_ps'].append(w_post_ps) return post_weights def test_ParrotNeuronSTDPProtocolPotentiation(self): """Check weight convergence on potentiation.""" post_weights = self.run_protocol(pre_post_shift=10.0) w_plain = np.array(post_weights['parrot']) w_precise = np.array(post_weights['parrot_ps']) assert all(w_plain == w_plain[0]), 'Plain weights differ' dw = w_precise - w_plain dwrel = dw[1:] / dw[:-1] assert all(np.round(dwrel, decimals=3) == 0.5), 'Precise weights do not converge.' def test_ParrotNeuronSTDPProtocolDepression(self): """Check weight convergence on depression.""" post_weights = self.run_protocol(pre_post_shift=-10.0) w_plain = np.array(post_weights['parrot']) w_precise = np.array(post_weights['parrot_ps']) assert all(w_plain == w_plain[0]), 'Plain weights differ' dw = w_precise - w_plain dwrel = dw[1:] / dw[:-1] assert all(np.round(dwrel, decimals=3) == 0.5), 'Precise weights do not converge.' def suite(): # makeSuite is sort of obsolete http://bugs.python.org/issue2721 # using loadTestsFromTestCase instead. suite = unittest.TestLoader().loadTestsFromTestCase(StdpSpikeMultiplicity) return unittest.TestSuite([suite]) def run(): runner = unittest.TextTestRunner(verbosity=2) runner.run(suite()) if __name__ == "__main__": run()
gpl-2.0
-3,635,231,856,413,262,300
37.8375
79
0.589744
false
Crespo911/pyspace
pySPACE/run/gui/node_chain_GUI.py
4
48976
""" Graphical user interface to create a complete processing flow This version uses PyQt4 and YAML, and was developed with Python 2.6.5 When started the GUI is checking for all existing nodes. It lists them and the corresponding parameters. Comments can also be inserted. They will appear at the beginning of the output file, which has the YAML format. :Author: Sirko Straube ([email protected]) :Created: 2010/06 .. todo:: There are still some things on the ToDo-list that I did not manage yet: - improve the help system/documentation (probably with search function) - to define "user defined" nodes (this is often used within the framework) - display default values for parameters - connect enter key with setParam Feel free to edit! """ import sys, yaml, copy from PyQt4 import QtCore, QtGui import os file_path = os.path.dirname(os.path.abspath(__file__)) pyspace_path = file_path[:file_path.rfind('pySPACE')-1] if not pyspace_path in sys.path: sys.path.append(pyspace_path) import pySPACE #automatically generate all nodes... import pySPACE.missions.nodes class ConfDialog(object): """ Central class of the gui Here all nodes are listed, the GUI is established and all necessary functions (within or behind the GUI) are defined. Therefore an instance of this class is a complete, fully functioning GUI. The object is created in the class ConfiguratorDialog (see below). """ def __init__(self, flow_directory): self.flow_directory = flow_directory raw_nodelist= pySPACE.missions.nodes.DEFAULT_NODE_MAPPING #create dict of nodes self.nodelist=[] self.types=['all'] for node in raw_nodelist: #desired name is the name of each entry self.nodelist.append(node) self.nodelist.sort() for line in self.nodelist: #getting all possible node types for type box current_type, valid=self.get_node_type(line) if not current_type in self.types and valid: self.types.append(current_type) self.types.sort() def setupUi(self, Dialog): """ This function does all the graphical stuff The output is mainly modified from raw code created with QTDesigner. Here just the layout of the GUI is defined. The whole arrangement is mainly controlled by horizontal and vertical layouts objects and a grid layout for the whole window. """ #setting up Dialog Dialog.setObjectName("Dialog") Dialog.setEnabled(True) Dialog.resize(780, 485) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth()) Dialog.setSizePolicy(sizePolicy) Dialog.setMouseTracking(False) Dialog.setAcceptDrops(False) #Dialog.setSizeGripEnabled(True) Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Configurator", None, QtGui.QApplication.UnicodeUTF8)) #left side of dialog containing node selection, corresponding parameters and values self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.hLayoutNodes = QtGui.QHBoxLayout() self.hLayoutNodes.setObjectName("hLayoutNodes") self.SelectedNodeTXT = QtGui.QLabel(Dialog) self.SelectedNodeTXT.setObjectName("SelectedNodeTXT") self.hLayoutNodes.addWidget(self.SelectedNodeTXT) self.SelectedNodeTXT.setText(QtGui.QApplication.translate("Dialog", "Selected Node", None, QtGui.QApplication.UnicodeUTF8)) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.hLayoutNodes.addItem(spacerItem) self.TypeNodeBox = QtGui.QComboBox(Dialog) self.TypeNodeBox.setObjectName("TypeNodeBox") self.hLayoutNodes.addWidget(self.TypeNodeBox) self.verticalLayout.addLayout(self.hLayoutNodes) self.SelectNodeBox = QtGui.QComboBox(Dialog) self.SelectNodeBox.setObjectName("SelectNodeBox") self.verticalLayout.addWidget(self.SelectNodeBox) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.PriorityLabel = QtGui.QLabel(Dialog) self.PriorityLabel.setObjectName("PriorityLabel") self.PriorityLabel.setText(QtGui.QApplication.translate("Dialog", "Priority", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_3.addWidget(self.PriorityLabel) self.PriorityLineEdit = QtGui.QLineEdit(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.PriorityLineEdit.sizePolicy().hasHeightForWidth()) self.PriorityLineEdit.setSizePolicy(sizePolicy) self.PriorityLineEdit.setMaximumSize(QtCore.QSize(30, 22)) self.PriorityLineEdit.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter) self.PriorityLineEdit.setObjectName("PriorityLineEdit") self.horizontalLayout_3.addWidget(self.PriorityLineEdit) self.NodeActive = QtGui.QCheckBox(Dialog) self.NodeActive.setObjectName("NodeActive") self.NodeActive.setChecked(True) self.NodeActive.setText(QtGui.QApplication.translate("Dialog", "is active", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_3.addWidget(self.NodeActive) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem) self.verticalLayout.addLayout(self.horizontalLayout_3) self.HelpButton = QtGui.QPushButton(Dialog) self.HelpButton.setObjectName("HelpButton") self.HelpButton.setText(QtGui.QApplication.translate("Dialog", " ? ", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_3.addWidget(self.HelpButton) self.horizontalLayout_7 = QtGui.QHBoxLayout() self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.InsertNodeButton = QtGui.QPushButton(Dialog) self.InsertNodeButton.setObjectName("InsertNodeButton") self.InsertNodeButton.setText(QtGui.QApplication.translate("Dialog", "Insert Node", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_7.addWidget(self.InsertNodeButton) self.DeleteNodeButton = QtGui.QPushButton(Dialog) self.DeleteNodeButton.setObjectName("DeleteNodeButton") self.DeleteNodeButton.setText(QtGui.QApplication.translate("Dialog", "Delete Node", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_7.addWidget(self.DeleteNodeButton) spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem4) self.verticalLayout.addLayout(self.horizontalLayout_7) self.line = QtGui.QFrame(Dialog) self.line.setLineWidth(5) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.AvailParamsTXT = QtGui.QLabel(Dialog) self.AvailParamsTXT.setObjectName("AvailParamsTXT") self.verticalLayout.addWidget(self.AvailParamsTXT) self.AvailParamsTXT.setText(QtGui.QApplication.translate("Dialog", "Available Parameters", None, QtGui.QApplication.UnicodeUTF8)) self.ParamBox = QtGui.QComboBox(Dialog) self.ParamBox.setEditable(False) self.ParamBox.setObjectName("ParamBox") self.verticalLayout.addWidget(self.ParamBox) self.ParamValueTXT = QtGui.QLabel(Dialog) self.ParamValueTXT.setObjectName("ParamValueTXT") self.verticalLayout.addWidget(self.ParamValueTXT) self.ParamValueTXT.setText(QtGui.QApplication.translate("Dialog", "Value", None, QtGui.QApplication.UnicodeUTF8)) self.ParamValue = QtGui.QLineEdit(Dialog) self.ParamValue.setObjectName("ParamValue") self.verticalLayout.addWidget(self.ParamValue) self.horizontalLayout_8 = QtGui.QHBoxLayout() self.horizontalLayout_8.setObjectName("horizontalLayout_8") self.SetParamButton = QtGui.QPushButton(Dialog) self.SetParamButton.setObjectName("SetParamButton") self.SetParamButton.setText(QtGui.QApplication.translate("Dialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_8.addWidget(self.SetParamButton) self.DefaultParamButton = QtGui.QPushButton(Dialog) self.DefaultParamButton.setObjectName("DefaultParamButton") self.DefaultParamButton.setText(QtGui.QApplication.translate("Dialog", "Default", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_8.addWidget(self.DefaultParamButton) spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_8.addItem(spacerItem5) self.verticalLayout.addLayout(self.horizontalLayout_8) self.line2 = QtGui.QFrame(Dialog) self.line2.setLineWidth(5) self.line2.setFrameShape(QtGui.QFrame.HLine) self.line2.setFrameShadow(QtGui.QFrame.Sunken) self.line2.setObjectName("line2") self.verticalLayout.addWidget(self.line2) self.Notes = QtGui.QTextEdit(Dialog) self.Notes.setObjectName("Notes") self.Notes.setEnabled(True) self.Notes.setReadOnly(True) self.verticalLayout.addWidget(self.Notes) self.Notes.setText("Notes: ") #some style changes self.Notes.setFrameStyle(QtGui.QFrame.NoFrame) notes_palette=self.Notes.palette() notes_palette.setColor(notes_palette.currentColorGroup(),notes_palette.Base, Dialog.palette().background().color()) self.Notes.setPalette(notes_palette) spacerItem = QtGui.QSpacerItem(18, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) #right side of dialog containing name of configuration file and parameter list self.verticalLayout_2 = QtGui.QVBoxLayout() self.verticalLayout_2.setObjectName("verticalLayout_2") self.FileTXT = QtGui.QLabel(Dialog) self.FileTXT.setObjectName("FileTXT") self.verticalLayout_2.addWidget(self.FileTXT) self.FileTXT.setText(QtGui.QApplication.translate("Dialog", "Selected Configuration File", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.filename="untitled.yaml" self.FileEdit = QtGui.QLineEdit(Dialog) self.FileEdit.setText(self.filename) self.FileEdit.setObjectName("FileEdit") self.horizontalLayout_2.addWidget(self.FileEdit) self.BrowseButton = QtGui.QPushButton(Dialog) self.BrowseButton.setObjectName("BrowseButton") self.horizontalLayout_2.addWidget(self.BrowseButton) self.BrowseButton.setText(QtGui.QApplication.translate("Dialog", "Browse", None, QtGui.QApplication.UnicodeUTF8)) self.verticalLayout_2.addLayout(self.horizontalLayout_2) self.hLayoutSpecs = QtGui.QHBoxLayout() self.hLayoutSpecs.setObjectName("hLayoutSpecs") self.SpecsLabel = QtGui.QLabel(Dialog) self.SpecsLabel.setObjectName("SpecsLabel") self.SpecsLabel.setText(QtGui.QApplication.translate("Dialog", "Specifications", None, QtGui.QApplication.UnicodeUTF8)) self.hLayoutSpecs.addWidget(self.SpecsLabel) spacerItemSpecs = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.hLayoutSpecs.addItem(spacerItemSpecs) self.EditCommentButton = QtGui.QPushButton(Dialog) self.EditCommentButton.setObjectName("EditCommentButton") self.EditCommentButton.setText(QtGui.QApplication.translate("Dialog", "Edit Comment", None, QtGui.QApplication.UnicodeUTF8)) self.hLayoutSpecs.addWidget(self.EditCommentButton) self.comment='' self.verticalLayout_2.addLayout(self.hLayoutSpecs) self.SpecsBox = QtGui.QTextEdit(Dialog) self.SpecsBox.setEnabled(True) self.SpecsBox.setReadOnly(True) self.SpecsBox.setObjectName("SpecsBox") self.verticalLayout_2.addWidget(self.SpecsBox) self.specs=[] #no specifications at start #...and the Save, Append and Close Buttons self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem3) self.LoadButton = QtGui.QPushButton(Dialog) self.LoadButton.setObjectName("LoadButton") self.LoadButton.setText(QtGui.QApplication.translate("Dialog", "Load", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout.addWidget(self.LoadButton) self.SaveButton = QtGui.QPushButton(Dialog) self.SaveButton.setObjectName("SaveButton") self.SaveButton.setText(QtGui.QApplication.translate("Dialog", "Save", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout.addWidget(self.SaveButton) self.CloseButton = QtGui.QPushButton(Dialog) self.CloseButton.setObjectName("CloseButton") self.CloseButton.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8)) self.horizontalLayout.addWidget(self.CloseButton) #setting Order of Tabulator Key Dialog.setTabOrder(self.FileEdit, self.BrowseButton) Dialog.setTabOrder(self.BrowseButton, self.SelectNodeBox) Dialog.setTabOrder(self.SelectNodeBox, self.ParamBox) Dialog.setTabOrder(self.ParamBox, self.ParamValue) Dialog.setTabOrder(self.ParamValue, self.SaveButton) Dialog.setTabOrder(self.SaveButton, self.InsertNodeButton) Dialog.setTabOrder(self.InsertNodeButton, self.CloseButton) #Finally, some further layout operations self.gridLayout = QtGui.QGridLayout(Dialog) self.gridLayout.setObjectName("gridLayout") self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.horizontalLayout_3.addLayout(self.verticalLayout) spacerItem1 = QtGui.QSpacerItem(60, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem1) self.gridLayout.addLayout(self.horizontalLayout_3, 0, 0, 1, 1) self.gridLayout.addLayout(self.horizontalLayout, 2, 1, 1, 1) self.gridLayout.addLayout(self.verticalLayout_2, 0, 1, 2, 1) #two more dialogues, probably needed self.init_CommentUi() self.init_HelpUi() #formatting Dialog, setting connections, and linking Node and Parameter Box self.TypeNodeBox.addItems(self.types) self.TypeNodeBox.setCurrentIndex(self.TypeNodeBox.findText('all',QtCore.Qt.MatchExactly)) self.formatNodeBox() self.formatParamBox() self.resetNoteConnections() self.makeConnections() ################################################################## ##The following functions deal with the Comment and Help dialog def init_CommentUi(self): """setting up layout and connections of the comment dialog""" self.Comment_Dial=QtGui.QDialog() self.Comment_Dial.setObjectName("Comment") self.Comment_Dial.resize(362, 297) self.Comment_Dial.gridLayout = QtGui.QGridLayout(self.Comment_Dial) self.Comment_Dial.gridLayout.setObjectName("gridLayout") self.Comment_Dial.textBrowser = QtGui.QTextEdit(self.Comment_Dial) self.Comment_Dial.textBrowser.setObjectName("textBrowser") self.Comment_Dial.gridLayout.addWidget(self.Comment_Dial.textBrowser, 0, 0, 1, 1) self.Comment_Dial.horizontalLayout = QtGui.QHBoxLayout() self.Comment_Dial.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.Comment_Dial.horizontalLayout.addItem(spacerItem) self.Comment_Dial.OKButton = QtGui.QPushButton(self.Comment_Dial) self.Comment_Dial.OKButton.setObjectName("OKButton") self.Comment_Dial.OKButton.setText(QtGui.QApplication.translate("Comment", "OK", None, QtGui.QApplication.UnicodeUTF8)) self.Comment_Dial.horizontalLayout.addWidget(self.Comment_Dial.OKButton) self.Comment_Dial.AbortButton = QtGui.QPushButton(self.Comment_Dial) self.Comment_Dial.AbortButton.setObjectName("AbortButton") self.Comment_Dial.AbortButton.setText(QtGui.QApplication.translate("Comment", "Abort", None, QtGui.QApplication.UnicodeUTF8)) self.Comment_Dial.horizontalLayout.addWidget(self.Comment_Dial.AbortButton) self.Comment_Dial.gridLayout.addLayout(self.Comment_Dial.horizontalLayout, 1, 0, 1, 1) self.Comment_Dial.setWindowTitle(QtGui.QApplication.translate("Comment", "Comment", None, QtGui.QApplication.UnicodeUTF8)) #Connections QtCore.QObject.connect(self.Comment_Dial.OKButton, QtCore.SIGNAL("clicked()"), self.update_comment) QtCore.QObject.connect(self.Comment_Dial.OKButton, QtCore.SIGNAL("clicked()"), self.Comment_Dial, QtCore.SLOT("close()")) QtCore.QObject.connect(self.Comment_Dial.AbortButton, QtCore.SIGNAL("clicked()"), self.Comment_Dial, QtCore.SLOT("close()")) def show_CommentUi(self): """shows and updates comment ui""" if self.comment: self.Comment_Dial.textBrowser.setText(self.comment) self.Comment_Dial.show() def update_comment(self): """writes content of commentUI-window into comment variable and modifies appropriately with # symbol (if necessary)""" comment=self.Comment_Dial.textBrowser.toPlainText() if not comment.startsWith('#') and not comment.isspace(): comment.insert(0,'# ') pos=1 while comment.indexOf('\n', pos)>=0: nextline=comment.indexOf('\n', pos)+1 if nextline<len(comment) and not comment[nextline] == '#': comment.insert(nextline,'#') pos=nextline self.comment=comment def init_HelpUi(self): """setting up layout and connections of the help dialog""" self.Help_Dial=QtGui.QDialog() self.Help_Dial.setObjectName("Help") self.Help_Dial.resize(700, 400) self.Help_Dial.gridLayout = QtGui.QGridLayout(self.Help_Dial) self.Help_Dial.gridLayout.setObjectName("gridLayout") self.Help_Dial.textBrowser = QtGui.QTextEdit(self.Help_Dial) self.Help_Dial.textBrowser.setObjectName("textBrowser") self.Help_Dial.textBrowser.setEnabled(True) self.Help_Dial.textBrowser.setReadOnly(True) self.Help_Dial.gridLayout.addWidget(self.Help_Dial.textBrowser, 0, 0, 1, 1) self.Help_Dial.horizontalLayout = QtGui.QHBoxLayout() self.Help_Dial.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.Help_Dial.horizontalLayout.addItem(spacerItem) self.Help_Dial.CloseButton = QtGui.QPushButton(self.Help_Dial) self.Help_Dial.CloseButton.setObjectName("CloseButton") self.Help_Dial.CloseButton.setText(QtGui.QApplication.translate("Help", "Close", None, QtGui.QApplication.UnicodeUTF8)) self.Help_Dial.horizontalLayout.addWidget(self.Help_Dial.CloseButton) self.Help_Dial.gridLayout.addLayout(self.Help_Dial.horizontalLayout, 1, 0, 1, 1) self.Help_Dial.setWindowTitle(QtGui.QApplication.translate("Help", "Help", None, QtGui.QApplication.UnicodeUTF8)) #Connections QtCore.QObject.connect(self.Help_Dial.CloseButton, QtCore.SIGNAL("clicked()"), self.Help_Dial, QtCore.SLOT("close()")) def show_HelpUi(self): """shows and updates help ui""" self.update_help() self.Help_Dial.show() def update_help(self): """updates text in help ui according to current node""" currentnode=str(self.SelectNodeBox.currentText()) if currentnode: self.Help_Dial.textBrowser.setText( pySPACE.missions.nodes.NODE_MAPPING[currentnode].__doc__) ################################################################## ############## internal formatting and helper functions def get_node_type(self, node_str): """ Parse node string for the node type This function assigns the node string (2nd argument) to a type which is used for the type sorting in the GUI. The function takes the whole NODE_MAPPING string: The type is the string between "pySPACE.missions.nodes." and the next "." and corresponds to the directory in pySPACE/missions/nodes/ :returns: node type string and a boolean reflecting the success of the assignment """ current_node_str = str(pySPACE.missions.nodes.NODE_MAPPING[node_str]) ref_str = 'pySPACE.missions.nodes.' startpos = current_node_str.find(ref_str)+len(ref_str) endpos = current_node_str.find('.',startpos) current_type = current_node_str[startpos:endpos] if 0 <= startpos < endpos: valid = True else: valid = False # the string of the current type is returned and a boolean # if the assignment worked return current_type, valid def insertImage(self, icon, filename): """insert image into label""" pixmap = QtGui.QPixmap() pixmap.load(filename) icon.setPixmap(pixmap.scaledToWidth(icon.width(), QtCore.Qt.SmoothTransformation)) def formatNodeBox(self): """Format NodeBox according to type selection in TypeNodeBox""" selected_type=str(self.TypeNodeBox.currentText()) self.SelectNodeBox.clear() if selected_type == 'all': self.SelectNodeBox.addItems(self.nodelist) else: temp_nodelist=[] for line in self.nodelist: current_type, valid=self.get_node_type(line) if current_type == selected_type: temp_nodelist.append(line) temp_nodelist.sort() self.SelectNodeBox.addItems(temp_nodelist) def formatParamBox(self): """Format ParamBox according to node selection in NodeBox. The parameter-list is derived from the parameters of the __init__ function of the current node. The parameters "self" and "args" are omitted, "kwargs" is translated into "user defined" """ currentnode=str(self.SelectNodeBox.currentText()) if currentnode: self.ParamBox.clear() parameters=list(pySPACE.missions.nodes.NODE_MAPPING[currentnode].__init__.im_func.func_code.co_varnames) if 'args' in parameters: parameters.pop(parameters.index('args')) if 'kwargs' in parameters: parameters.pop(parameters.index('kwargs')) parameters.append('user defined') if 'self' in parameters: parameters.pop(parameters.index('self')) parameters.sort() self.ParamBox.addItems(parameters) def validateNodesandParams(self, specs): """This function checks the current specification for possible unspecified nodes. This is necessary when e.g. a file is loaded. The file is ignored, if a non-existing node is identified. When a parameter is not present a warning is printed in the notes section, since user defined parameters are allowed. """ for specsnode in specs: if specsnode['node'] in self.nodelist: #checking parameters if 'parameters' in specsnode: parameters=list( pySPACE.missions.nodes.NODE_MAPPING[specsnode['node']].__init__.im_func.func_code.co_varnames) for param in specsnode['parameters']: if not param in parameters: self.note('Warning: File contains unspecified node parameter:Node <%s> has no default parameter <%s>!' % (specsnode['node'], param)) return True else: self.note('File contains unspecified node: <%s> not existent! File ignored.' % specsnode['node']) return False return True def eval_user_defined(self): """Evaluate a user defined parameter, so that it can be managed properly. This function is executed whenever a user defined parameter is set. It decomposes the entry into <paramname>:<paramval> and returns both. If the format is wrong, nothing happens and a message is printed in the notes section. """ userdef=str(self.ParamValue.text()) separator=userdef.find(':') if separator>0: current_param=userdef[0:separator] current_value=userdef[separator+1:] else: current_param='' current_value='' self.note("Wrong input format: Use \'param:value\'!") return current_param, current_value def get_param_value(self): """Get the value of the current parameter (if given) in order to display it in the corresponding TextEdit (ParamValue). If a user defined parameter is selected, the function looks for undefined parameters and displays <name>:<value> instead of <value>. """ if not self.PriorityLineEdit.text().isEmpty() and self.specs: #only if priority is specified currentnode=str(self.SelectNodeBox.currentText()) selected_param=str(self.ParamBox.currentText()) current_value='' current_pos=int(self.PriorityLineEdit.text())-1 #position=priority-1 if selected_param == 'user defined': paramlist=[] for current_param in range(self.ParamBox.count()): #create a list of the parameters currently available paramlist.append(str(self.ParamBox.itemText(current_param))) avail_params=self.specs[current_pos]['parameters'].keys() #take all parameters specified for current node for line in avail_params: if line not in paramlist: selected_param=line #found first user defined parameter, when specified parameter is not in paramlist current_value=line + ':' #value now contains "<name>:" - the value is added later break if current_pos<=len(self.specs) \ and currentnode in self.specs[current_pos]['node'] \ and 'parameters' in self.specs[current_pos] \ and selected_param in self.specs[current_pos]['parameters']: #node and parameter exist current_value=current_value + str(self.specs[current_pos]['parameters'][selected_param]) #add current value self.ParamValue.setText(current_value) else: #parameter not specified => empty field self.ParamValue.setText('') else:#parameter not specified => empty field self.ParamValue.setText('') def find_inactive_nodes(self, rawdata): """ This function takes raw YAML data (i.e. text not loaded with YAML function) and looks for commented nodes. The index of all of these nodes is returned, so that these nodes later get the value False for node_is_active. """ pos=0 nnodes=rawdata.count('node: ') #nodes are detected with string matching inactive_nodes=[] for nodenr in range(nnodes): nextnode=rawdata.find('node: ', pos) #next node position is also detected with string matching if nextnode and '#' in rawdata[rawdata.rfind('\n',0,nextnode):nextnode]: #if node is a comment inactive_nodes.append(nodenr) pos=nextnode+1 return inactive_nodes def deactivate_nodes(self): """Function that (i) uses yaml.dump to format dict according to YAML specifications and (ii) deactivates nodes by adding the comment symbol "#". Here, a deepcopy is used, because the specs dict is changed (i.e. the entry node_is_active is deleted). The function checks and dumps one node after the other: Due to a difference in formatting (by YAML) when dumping one in contrast to more than one node, additional spaces are also added here. Return value is the complete specification text in YAML format. """ localspecs=copy.deepcopy(self.specs) rawdata="" inactive_node=[] for line in localspecs: node_is_active=line['node_is_active'] #store boolean value del line['node_is_active'] #delete entry tempdata=yaml.dump(line) #data of current node tempdata='- ' + tempdata #change YAML single format to format for multiple entries tempdata=tempdata.replace('\n','\n ') #change YAML single format to format for multiple entries if tempdata[-2:] == ' ': tempdata=tempdata[0:-2] #remove possible spaces if not node_is_active: #add comment symbol, if necessary tempdata='#' + tempdata tempdata=tempdata.replace('\n','\n#') if tempdata.endswith('#'): tempdata=tempdata[0:-1] rawdata=rawdata+tempdata #add current node return rawdata def showSpecs(self): """show the complete specification text in YAML format""" self.SpecsBox.setText(self.deactivate_nodes()) def note(self, notestring): """add notestring to current notes""" self.Notes.setText(self.Notes.toPlainText()+notestring+"\n") def import_comment(self, filename): """this function loads the given filename and stores all lines starting with '#' in the comment variable. These lines have to be at the beginning of the file and must not start with a node specification (in YAML format).""" open_file=open(filename) lines=open_file.readlines() open_file.close() self.comment='' startyaml=False for line in lines: if line.startswith('#'): if line.startswith('#- node: ') or line.startswith('#- {node: '): break else: self.comment=self.comment + line else: break ############## all connections in GUI def resetNoteConnections(self): """Each time something happens in the GUI, the notes are deleted. The current function establishes these connections""" QtCore.QObject.connect(self.SelectNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.resetNotes) QtCore.QObject.connect(self.PriorityLineEdit, QtCore.SIGNAL("textEdited(const QString&)"), self.resetNotes) QtCore.QObject.connect(self.NodeActive, QtCore.SIGNAL("clicked()"), self.resetNotes) QtCore.QObject.connect(self.InsertNodeButton, QtCore.SIGNAL("clicked()"), self.resetNotes) QtCore.QObject.connect(self.DeleteNodeButton, QtCore.SIGNAL("clicked()"), self.resetNotes) QtCore.QObject.connect(self.ParamBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.resetNotes) QtCore.QObject.connect(self.ParamValue, QtCore.SIGNAL("textEdited(const QString&)"), self.resetNotes) QtCore.QObject.connect(self.SetParamButton, QtCore.SIGNAL("clicked()"), self.resetNotes) QtCore.QObject.connect(self.DefaultParamButton, QtCore.SIGNAL("clicked()"), self.resetNotes) QtCore.QObject.connect(self.BrowseButton, QtCore.SIGNAL("clicked()"), self.resetNotes) QtCore.QObject.connect(self.FileEdit, QtCore.SIGNAL("textEdited(const QString&)"), self.resetNotes) QtCore.QObject.connect(self.LoadButton, QtCore.SIGNAL("clicked()"), self.resetNotes) def makeConnections(self): """All other connections that are necessary to make the GUI work.""" QtCore.QObject.connect(self.TypeNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.formatNodeBox) QtCore.QObject.connect(self.SelectNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.formatParamBox) QtCore.QObject.connect(self.SelectNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.getPriority) QtCore.QObject.connect(self.SelectNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.getState) QtCore.QObject.connect(self.SelectNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.get_param_value) QtCore.QObject.connect(self.SelectNodeBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.update_help) QtCore.QObject.connect(self.NodeActive, QtCore.SIGNAL("clicked()"), self.setState) QtCore.QObject.connect(self.NodeActive, QtCore.SIGNAL("clicked()"), self.showSpecs) QtCore.QObject.connect(self.PriorityLineEdit, QtCore.SIGNAL("textEdited(const QString&)"), self.getState) QtCore.QObject.connect(self.PriorityLineEdit, QtCore.SIGNAL("textEdited(const QString&)"), self.get_param_value) QtCore.QObject.connect(self.HelpButton, QtCore.SIGNAL("clicked()"), self.show_HelpUi) QtCore.QObject.connect(self.InsertNodeButton, QtCore.SIGNAL("clicked()"), self.insertNode) QtCore.QObject.connect(self.DeleteNodeButton, QtCore.SIGNAL("clicked()"), self.delNode) QtCore.QObject.connect(self.ParamBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.note_user_defined) QtCore.QObject.connect(self.ParamBox, QtCore.SIGNAL("currentIndexChanged(int)"), self.get_param_value) QtCore.QObject.connect(self.SetParamButton, QtCore.SIGNAL("clicked()"), self.setParam) QtCore.QObject.connect(self.DefaultParamButton, QtCore.SIGNAL("clicked()"), self.defaultParam) QtCore.QObject.connect(self.BrowseButton, QtCore.SIGNAL("clicked()"), self.selectFile) QtCore.QObject.connect(self.FileEdit, QtCore.SIGNAL("editingFinished()"), self.updateFileName) QtCore.QObject.connect(self.FileEdit, QtCore.SIGNAL("textEdited (const QString&)"), self.updateFileName) QtCore.QObject.connect(self.EditCommentButton, QtCore.SIGNAL("clicked()"), self.show_CommentUi) QtCore.QObject.connect(self.LoadButton, QtCore.SIGNAL("clicked()"), self.loadYAMLFile) QtCore.QObject.connect(self.SaveButton, QtCore.SIGNAL("clicked()"), self.saveYAMLFile) QtCore.QObject.connect(self.CloseButton, QtCore.SIGNAL("clicked()"), self.Comment_Dial, QtCore.SLOT("close()")) QtCore.QObject.connect(self.CloseButton, QtCore.SIGNAL("clicked()"), self.Help_Dial, QtCore.SLOT("close()")) ############## functions mainly executed by user using GUI def selectFile(self): """Opens the SelectFile Dialog and inserts filename in FileEdit field. If no file is selected, the filename is set to untitled.yaml""" self.filename = \ QtGui.QFileDialog.getSaveFileName(None, 'Select File', self.flow_directory, 'YAML files (*.yaml);;All files (*)') if self.filename.isEmpty(): self.filename = QtCore.QString("untitled.yaml") self.FileEdit.setText(self.filename) def loadYAMLFile(self): """load YAML file specified by user: a dialog opens asking the user to specify a file then the file is decomposed into comment header, active and inactive nodes the results are written into the comment and the specs variable """ open_fname = QtGui.QFileDialog.getOpenFileName(None, 'Open File', '.', 'YAML files (*.yaml);;All files (*)') if not open_fname.isEmpty(): self.import_comment(open_fname) yamlfile=open(open_fname) raw_yaml=yamlfile.read() yamlfile.close() if self.comment: raw_yaml=raw_yaml[len(self.comment)-1:] #cut comment inactive_nodes=self.find_inactive_nodes(raw_yaml) #raw_yaml=self.del_comment_between(raw_yaml) specs=yaml.load(raw_yaml.replace('#', '')) for nodenr in range(len(specs)): if nodenr in inactive_nodes: specs[nodenr]['node_is_active']=False else: specs[nodenr]['node_is_active']=True if self.validateNodesandParams(specs): self.filename=open_fname self.FileEdit.setText(self.filename) self.specs=specs self.showSpecs() def saveYAMLFile(self): """saves specs to specified file: if there is a comment, it is written in the header all inactive nodes are marked correspondingly""" if self.specs: savefile=open(self.filename, 'w') savefile.write(self.comment) savefile.write('\n') savefile.write(self.deactivate_nodes()) savefile.close() def updateFileName(self): """new filename is stored internally""" self.filename = self.FileEdit.text() def setPriority(self, priority): """the given priority is displayed in the corresponding line edit""" self.PriorityLineEdit.setText(str(priority)) def getPriority(self): """determine the priority of the selected node. if the node is specified multiple times, the priority of the first node found is taken respective notes are displayed""" currentnode=str(self.SelectNodeBox.currentText()) pos=1 nodefound=False for entry in self.specs: if currentnode in entry['node']: nodefound=True break pos+=1 if nodefound: self.note("If you use the selected node multiple times, edit the desired node by entering the appropriate priority.") self.PriorityLineEdit.setText(str(pos)) else: self.note("Selected Node is new. Select desired priority and press >>Insert Node<<.") self.PriorityLineEdit.setText("") def getState(self): """determines if node is specified as active and sets check accordingly in GUI""" if self.specs: priority=self.PriorityLineEdit.text() if not priority.isEmpty() and int(priority)>0 and int(priority)-1<len(self.specs): self.NodeActive.setChecked(self.specs[int(priority)-1]['node_is_active']) def setState(self): """set state in specs (internally)""" checked=False if self.specs: priority=self.PriorityLineEdit.text() if not priority.isEmpty() and int(priority)>0 and int(priority)-1<len(self.specs): if self.NodeActive.checkState(): checked=True self.specs[int(priority)-1]['node_is_active']=checked else: raise(ValueError,'Warning: node_chain_GUI::setState: Function called without valid priority!') #should never happen def insertNode(self): """inserts a node in specifications according to where the user specified it function is executed when user presses <insert> button when priority is not specified (or wrong), the node is appended at the end""" currentnode=str(self.SelectNodeBox.currentText()) if self.specs: priority=self.PriorityLineEdit.text() if not priority.isEmpty() and int(priority)>0: self.specs.insert(int(priority)-1, dict(node=currentnode)) else: self.specs.append(dict(node=currentnode)) self.setPriority(len(self.specs)) self.note("No or wrong priority given! Node appended at the end.") else: self.specs.append(dict(node=currentnode)) self.setPriority(len(self.specs)) self.note_user_defined() self.setState() self.showSpecs() def delNode(self): """delete node from current specifications. delete is ignored if priority is not specified correct""" currentnode=str(self.SelectNodeBox.currentText()) if self.specs: if self.PriorityLineEdit.text(): current_priority=int(self.PriorityLineEdit.text()) if currentnode in self.specs[current_priority-1]['node']: del self.specs[current_priority-1] else: self.note("No node at given priority! Delete ignored.") else: self.note("No priority given! Delete ignored.") else: self.note("No specifications given! Delete ignored.") self.note_user_defined() self.getPriority() self.showSpecs() def setParam(self): """ insert parameter into current specifications this is only happening, if user specifies node and priority correctly if parameter is existing, the value is only changed, if not, a new entry is established in specs the user defined parameter case is also considered, given it is entered in the expected way: 'param:value' """ currentnode=str(self.SelectNodeBox.currentText()) if not self.PriorityLineEdit.text().isEmpty(): #priority is specified current_pos=int(self.PriorityLineEdit.text())-1 #position=priority-1 if currentnode in self.specs[current_pos]['node']: #node exists if not self.ParamValue.text().isEmpty(): #parameter value is specified if str(self.ParamBox.currentText()) == 'user defined': selected_param, current_value=self.eval_user_defined() else: selected_param=str(self.ParamBox.currentText()) current_value=str(self.ParamValue.text()) #todo: check this if isinstance(current_value, basestring) and current_value.startswith("eval("): current_value = eval(current_value[5:-1]) if selected_param: #not the case if user specified 'user defined' parameter wrong if not 'parameters' in self.specs[current_pos]: #node has no parameters so far templist=[] templist.append(selected_param) self.specs[current_pos].update(dict(parameters=(dict.fromkeys(templist, current_value)))) else: current_params=self.specs[current_pos]['parameters'] if selected_param in current_params: #parameter exists current_params[selected_param]=current_value else: #insert new parameter templist=[] templist.append(selected_param) current_params.update(dict.fromkeys(templist, current_value)) else: self.note("No parameter value entered! Set parameter ignored.") else: self.note("No node at given priority! Please change priority appropriately.") else: self.note("No priority given! Please change priority appropriately.") self.showSpecs() def defaultParam(self): """the default value is not set here. instead, the parameter is deleted from the specifications, so that the default values are used. .. note:: this shortcoming should be improved in future versions """ currentnode=str(self.SelectNodeBox.currentText()) if not self.PriorityLineEdit.text().isEmpty(): #priority is specified current_pos=int(self.PriorityLineEdit.text())-1 #position=priority-1 if currentnode in self.specs[current_pos]['node']: #node exists if 'parameters' in self.specs[current_pos]: if str(self.ParamBox.currentText()) == 'user defined': selected_param=str(self.ParamValue.text()) else: selected_param=str(self.ParamBox.currentText()) current_params=self.specs[current_pos]['parameters'] if selected_param in current_params: del current_params[selected_param] else: self.note("No parameter <%s> found." % selected_param) if len(current_params)==0: del self.specs[current_pos]['parameters'] else: self.note("No node at given priority! Please change priority appropriately.") else: self.note("No priority given! Please change priority appropriately.") self.showSpecs() def note_user_defined(self): """display a special note only for user defined parameters""" if str(self.ParamBox.currentText()) == 'user defined': self.note("You have selected to define an additional parameter. To set parameter please enter name and value, separated by \':\' (e.g. myparam:5). To set default value enter only parameter name.") def resetNotes(self): """reset text in Notes""" self.Notes.setText("Notes: ") class NodeChainConfigurationWidget(QtGui.QWidget): """class which sets up GUI""" def __init__(self, flow_directory='.', parent=None): super(NodeChainConfigurationWidget, self).__init__() self.parent = parent self.confDialog = ConfDialog(flow_directory) self.confDialog.setupUi(self) self.connect(self.confDialog.CloseButton, QtCore.SIGNAL("clicked()"), self.close) def close(self): """ Close all gui components and gui """ self.parent.close() super(NodeChainConfigurationWidget, self).close() if __name__ == "__main__": """main: start GUI and close program, when all windows are closed""" app = QtGui.QApplication(sys.argv) configD = NodeChainConfigurationWidget() configD.show() configD.confDialog.DeleteNodeButton.setDefault(False) #without this, Button is automatically set to Default (for whatever reason) configD.confDialog.HelpButton.setDefault(False) #without this, Button is automatically set to Default (for whatever reason) app.connect(app, QtCore.SIGNAL("lastWindowClosed()"), app, QtCore.SLOT("quit()")) sys.exit(app.exec_())
gpl-3.0
-804,507,696,264,230,000
49.283368
208
0.639599
false
endlessm/chromium-browser
tools/json_schema_compiler/idl_schema.py
3
21823
#! /usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function import itertools import json import os.path import pprint import re import sys if sys.version_info.major == 2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest from json_parse import OrderedDict # This file is a peer to json_schema.py. Each of these files understands a # certain format describing APIs (either JSON or IDL), reads files written # in that format into memory, and emits them as a Python array of objects # corresponding to those APIs, where the objects are formatted in a way that # the JSON schema compiler understands. compiler.py drives both idl_schema.py # and json_schema.py. # idl_parser expects to be able to import certain files in its directory, # so let's set things up the way it wants. _idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, 'ppapi', 'generators') if _idl_generators_path in sys.path: import idl_parser else: sys.path.insert(0, _idl_generators_path) try: import idl_parser finally: sys.path.pop(0) def ProcessComment(comment): ''' Convert a comment into a parent comment and a list of parameter comments. Function comments are of the form: Function documentation. May contain HTML and multiple lines. |arg1_name|: Description of arg1. Use <var>argument</var> to refer to other arguments. |arg2_name|: Description of arg2... Newlines are removed, and leading and trailing whitespace is stripped. Args: comment: The string from a Comment node. Returns: A tuple that looks like: ( "The processed comment, minus all |parameter| mentions and jsexterns.", "Any block wrapped in <jsexterns></jsexterns>.", { 'parameter_name_1': "The comment that followed |parameter_name_1|:", ... } ) ''' jsexterns = None match = re.search('<jsexterns>(.*)</jsexterns>', comment, re.DOTALL) if match: jsexterns = match.group(1).strip() comment = comment[:match.start()] + comment[match.end():] def add_paragraphs(content): paragraphs = content.split('\n\n') if len(paragraphs) < 2: return content return '<p>' + '</p><p>'.join(p.strip() for p in paragraphs) + '</p>' # Find all the parameter comments of the form '|name|: comment'. parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment)) # Get the parent comment (everything before the first parameter comment. first_parameter_location = (parameter_starts[0].start() if parameter_starts else len(comment)) parent_comment = (add_paragraphs(comment[:first_parameter_location].strip()) .replace('\n', '')) params = OrderedDict() for (cur_param, next_param) in zip_longest(parameter_starts, parameter_starts[1:]): param_name = cur_param.group(1) # A parameter's comment goes from the end of its introduction to the # beginning of the next parameter's introduction. param_comment_start = cur_param.end() param_comment_end = next_param.start() if next_param else len(comment) params[param_name] = ( add_paragraphs(comment[param_comment_start:param_comment_end].strip()) .replace('\n', '')) return (parent_comment, jsexterns, params) class Callspec(object): ''' Given a Callspec node representing an IDL function declaration, converts into a tuple: (name, list of function parameters, return type) ''' def __init__(self, callspec_node, comment): self.node = callspec_node self.comment = comment def process(self, callbacks): parameters = [] return_type = None if self.node.GetProperty('TYPEREF') not in ('void', None): return_type = Typeref(self.node.GetProperty('TYPEREF'), self.node.parent, {'name': self.node.GetName()}).process(callbacks) # The IDL parser doesn't allow specifying return types as optional. # Instead we infer any object return values to be optional. # TODO(asargent): fix the IDL parser to support optional return types. if return_type.get('type') == 'object' or '$ref' in return_type: return_type['optional'] = True for node in self.node.GetChildren(): parameter = Param(node).process(callbacks) if parameter['name'] in self.comment: parameter['description'] = self.comment[parameter['name']] parameters.append(parameter) return (self.node.GetName(), parameters, return_type) class Param(object): ''' Given a Param node representing a function parameter, converts into a Python dictionary that the JSON schema compiler expects to see. ''' def __init__(self, param_node): self.node = param_node def process(self, callbacks): return Typeref(self.node.GetProperty('TYPEREF'), self.node, {'name': self.node.GetName()}).process(callbacks) class Dictionary(object): ''' Given an IDL Dictionary node, converts into a Python dictionary that the JSON schema compiler expects to see. ''' def __init__(self, dictionary_node): self.node = dictionary_node def process(self, callbacks): properties = OrderedDict() for node in self.node.GetChildren(): if node.cls == 'Member': k, v = Member(node).process(callbacks) properties[k] = v result = {'id': self.node.GetName(), 'properties': properties, 'type': 'object'} if self.node.GetProperty('nodefine'): result['nodefine'] = True if self.node.GetProperty('nodoc'): result['nodoc'] = True elif self.node.GetProperty('inline_doc'): result['inline_doc'] = True elif self.node.GetProperty('noinline_doc'): result['noinline_doc'] = True return result class Member(object): ''' Given an IDL dictionary or interface member, converts into a name/value pair where the value is a Python dictionary that the JSON schema compiler expects to see. ''' def __init__(self, member_node): self.node = member_node def process(self, callbacks, functions_are_properties=False): properties = OrderedDict() name = self.node.GetName() if self.node.GetProperty('deprecated'): properties['deprecated'] = self.node.GetProperty('deprecated') for property_name in ['allowAmbiguousOptionalArguments', 'nodoc', 'nocompile', 'nodart', 'nodefine']: if self.node.GetProperty(property_name): properties[property_name] = True if self.node.GetProperty('OPTIONAL'): properties['optional'] = True for option_name, sanitizer in [ ('maxListeners', int), ('supportsFilters', lambda s: s == 'true'), ('supportsListeners', lambda s: s == 'true'), ('supportsRules', lambda s: s == 'true')]: if self.node.GetProperty(option_name): if 'options' not in properties: properties['options'] = {} properties['options'][option_name] = sanitizer(self.node.GetProperty( option_name)) type_override = None parameter_comments = OrderedDict() for node in self.node.GetChildren(): if node.cls == 'Comment': (parent_comment, jsexterns, parameter_comments) = ProcessComment( node.GetName()) properties['description'] = parent_comment properties['jsexterns'] = jsexterns elif node.cls == 'Callspec': name, parameters, return_type = (Callspec(node, parameter_comments) .process(callbacks)) if functions_are_properties: # If functions are treated as properties (which will happen if the # interface is named Properties) then this isn't a function, it's a # property which is encoded as a function with no arguments. The # property type is the return type. This is an egregious hack in lieu # of the IDL parser supporting 'const'. assert parameters == [], ( 'Property "%s" must be no-argument functions ' 'with a non-void return type' % name) assert return_type is not None, ( 'Property "%s" must be no-argument functions ' 'with a non-void return type' % name) assert 'type' in return_type, ( 'Property return type "%s" from "%s" must specify a ' 'fundamental IDL type.' % (pprint.pformat(return_type), name)) type_override = return_type['type'] else: type_override = 'function' properties['parameters'] = parameters if return_type is not None: properties['returns'] = return_type properties['name'] = name if type_override is not None: properties['type'] = type_override else: properties = Typeref(self.node.GetProperty('TYPEREF'), self.node, properties).process(callbacks) value = self.node.GetProperty('value') if value is not None: # IDL always returns values as strings, so cast to their real type. properties['value'] = self.cast_from_json_type(properties['type'], value) enum_values = self.node.GetProperty('legalValues') if enum_values: # IDL always returns enum values as strings, so cast to their real type. properties['enum'] = [self.cast_from_json_type(properties['type'], enum) for enum in enum_values] return name, properties def cast_from_json_type(self, json_type, string_value): '''Casts from string |string_value| to a real Python type based on a JSON Schema type |json_type|. For example, a string value of '42' and a JSON Schema type 'integer' will cast to int('42') ==> 42. ''' if json_type == 'integer': return int(string_value) if json_type == 'number': return float(string_value) # Add more as necessary. assert json_type == 'string', ( 'No rule exists to cast JSON Schema type "%s" to its equivalent ' 'Python type for value "%s". You must add a new rule here.' % (json_type, string_value)) return string_value class Typeref(object): ''' Given a TYPEREF property representing the type of dictionary member or function parameter, converts into a Python dictionary that the JSON schema compiler expects to see. ''' def __init__(self, typeref, parent, additional_properties): self.typeref = typeref self.parent = parent self.additional_properties = additional_properties def process(self, callbacks): properties = self.additional_properties result = properties if self.parent.GetPropertyLocal('OPTIONAL'): properties['optional'] = True # The IDL parser denotes array types by adding a child 'Array' node onto # the Param node in the Callspec. for sibling in self.parent.GetChildren(): if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName(): properties['type'] = 'array' properties['items'] = OrderedDict() properties = properties['items'] break if self.typeref == 'DOMString': properties['type'] = 'string' elif self.typeref == 'boolean': properties['type'] = 'boolean' elif self.typeref == 'double': properties['type'] = 'number' elif self.typeref == 'long': properties['type'] = 'integer' elif self.typeref == 'any': properties['type'] = 'any' elif self.typeref == 'object': properties['type'] = 'object' if 'additionalProperties' not in properties: properties['additionalProperties'] = OrderedDict() properties['additionalProperties']['type'] = 'any' instance_of = self.parent.GetProperty('instanceOf') if instance_of: properties['isInstanceOf'] = instance_of elif self.typeref == 'ArrayBuffer': properties['type'] = 'binary' properties['isInstanceOf'] = 'ArrayBuffer' elif self.typeref == 'ArrayBufferView': properties['type'] = 'binary' # We force the APIs to specify instanceOf since ArrayBufferView isn't an # instantiable type, therefore we don't specify isInstanceOf here. elif self.typeref == 'FileEntry': properties['type'] = 'object' properties['isInstanceOf'] = 'FileEntry' if 'additionalProperties' not in properties: properties['additionalProperties'] = OrderedDict() properties['additionalProperties']['type'] = 'any' elif self.parent.GetPropertyLocal('Union'): properties['choices'] = [Typeref(node.GetProperty('TYPEREF'), node, OrderedDict()).process(callbacks) for node in self.parent.GetChildren() if node.cls == 'Option'] elif self.typeref is None: properties['type'] = 'function' else: if self.typeref in callbacks: # Do not override name and description if they are already specified. name = properties.get('name', None) description = properties.get('description', None) properties.update(callbacks[self.typeref]) if description is not None: properties['description'] = description if name is not None: properties['name'] = name else: properties['$ref'] = self.typeref return result class Enum(object): ''' Given an IDL Enum node, converts into a Python dictionary that the JSON schema compiler expects to see. ''' def __init__(self, enum_node): self.node = enum_node self.description = '' def process(self): enum = [] for node in self.node.GetChildren(): if node.cls == 'EnumItem': enum_value = {'name': node.GetName()} if node.GetProperty('nodoc'): enum_value['nodoc'] = True for child in node.GetChildren(): if child.cls == 'Comment': enum_value['description'] = ProcessComment(child.GetName())[0] else: raise ValueError('Did not process %s %s' % (child.cls, child)) enum.append(enum_value) elif node.cls == 'Comment': self.description = ProcessComment(node.GetName())[0] else: sys.exit('Did not process %s %s' % (node.cls, node)) result = {'id' : self.node.GetName(), 'description': self.description, 'type': 'string', 'enum': enum} for property_name in ('cpp_enum_prefix_override', 'inline_doc', 'noinline_doc', 'nodefine', 'nodoc',): if self.node.GetProperty(property_name): result[property_name] = self.node.GetProperty(property_name) if self.node.GetProperty('deprecated'): result['deprecated'] = self.node.GetProperty('deprecated') return result class Namespace(object): ''' Given an IDLNode representing an IDL namespace, converts into a Python dictionary that the JSON schema compiler expects to see. ''' def __init__(self, namespace_node, description, nodoc=False, internal=False, platforms=None, compiler_options=None, deprecated=None, documentation_options=None): self.namespace = namespace_node self.nodoc = nodoc self.internal = internal self.platforms = platforms self.compiler_options = compiler_options self.events = [] self.functions = [] self.properties = OrderedDict() self.types = [] self.callbacks = OrderedDict() self.description = description self.deprecated = deprecated self.documentation_options = documentation_options def process(self): for node in self.namespace.GetChildren(): if node.cls == 'Dictionary': self.types.append(Dictionary(node).process(self.callbacks)) elif node.cls == 'Callback': k, v = Member(node).process(self.callbacks) self.callbacks[k] = v elif node.cls == 'Interface' and node.GetName() == 'Functions': self.functions = self.process_interface(node) elif node.cls == 'Interface' and node.GetName() == 'Events': self.events = self.process_interface(node) elif node.cls == 'Interface' and node.GetName() == 'Properties': properties_as_list = self.process_interface( node, functions_are_properties=True) for prop in properties_as_list: # Properties are given as key-value pairs, but IDL will parse # it as a list. Convert back to key-value pairs. prop_name = prop.pop('name') assert not prop_name in self.properties, ( 'Property "%s" cannot be specified more than once.' % prop_name) self.properties[prop_name] = prop elif node.cls == 'Enum': self.types.append(Enum(node).process()) else: sys.exit('Did not process %s %s' % (node.cls, node)) compiler_options = self.compiler_options or {} documentation_options = self.documentation_options or {} return {'namespace': self.namespace.GetName(), 'description': self.description, 'nodoc': self.nodoc, 'types': self.types, 'functions': self.functions, 'properties': self.properties, 'internal': self.internal, 'events': self.events, 'platforms': self.platforms, 'compiler_options': compiler_options, 'deprecated': self.deprecated, 'documentation_options': documentation_options} def process_interface(self, node, functions_are_properties=False): members = [] for member in node.GetChildren(): if member.cls == 'Member': _, properties = Member(member).process( self.callbacks, functions_are_properties=functions_are_properties) members.append(properties) return members class IDLSchema(object): ''' Given a list of IDLNodes and IDLAttributes, converts into a Python list of api_defs that the JSON schema compiler expects to see. ''' def __init__(self, idl): self.idl = idl def process(self): namespaces = [] nodoc = False internal = False description = None platforms = None compiler_options = {} deprecated = None documentation_options = {} for node in self.idl: if node.cls == 'Namespace': if not description: # TODO(kalman): Go back to throwing an error here. print('%s must have a namespace-level comment. This will ' 'appear on the API summary page.' % node.GetName()) description = '' namespace = Namespace(node, description, nodoc, internal, platforms=platforms, compiler_options=compiler_options or None, deprecated=deprecated, documentation_options=documentation_options) namespaces.append(namespace.process()) nodoc = False internal = False platforms = None compiler_options = None elif node.cls == 'Copyright': continue elif node.cls == 'Comment': description = node.GetName() elif node.cls == 'ExtAttribute': if node.name == 'nodoc': nodoc = bool(node.value) elif node.name == 'internal': internal = bool(node.value) elif node.name == 'platforms': platforms = list(node.value) elif node.name == 'implemented_in': compiler_options['implemented_in'] = node.value elif node.name == 'camel_case_enum_to_string': compiler_options['camel_case_enum_to_string'] = node.value elif node.name == 'generate_error_messages': compiler_options['generate_error_messages'] = True elif node.name == 'deprecated': deprecated = str(node.value) elif node.name == 'documentation_title': documentation_options['title'] = node.value elif node.name == 'documentation_namespace': documentation_options['namespace'] = node.value elif node.name == 'documented_in': documentation_options['documented_in'] = node.value else: continue else: sys.exit('Did not process %s %s' % (node.cls, node)) return namespaces def Load(filename): ''' Given the filename of an IDL file, parses it and returns an equivalent Python dictionary in a format that the JSON schema compiler expects to see. ''' f = open(filename, 'r') contents = f.read() f.close() return Process(contents, filename) def Process(contents, filename): ''' Processes the contents of a file and returns an equivalent Python dictionary in a format that the JSON schema compiler expects to see. (Separate from Load primarily for testing purposes.) ''' idl = idl_parser.IDLParser().ParseData(contents, filename) idl_schema = IDLSchema(idl) return idl_schema.process() def Main(): ''' Dump a json serialization of parse result for the IDL files whose names were passed in on the command line. ''' if len(sys.argv) > 1: for filename in sys.argv[1:]: schema = Load(filename) print(json.dumps(schema, indent=2)) else: contents = sys.stdin.read() idl = idl_parser.IDLParser().ParseData(contents, '<stdin>') schema = IDLSchema(idl).process() print(json.dumps(schema, indent=2)) if __name__ == '__main__': Main()
bsd-3-clause
5,246,746,538,860,953,000
36.240614
80
0.625945
false
hungtt57/matchmaker
lib/python2.7/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py
4
1991
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import warnings from pyasn1.codec.der import decoder, encoder from pyasn1.error import PyAsn1Error from pyasn1.type import namedtype, univ import six from cryptography import utils class _DSSSigValue(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('r', univ.Integer()), namedtype.NamedType('s', univ.Integer()) ) def decode_rfc6979_signature(signature): warnings.warn( "decode_rfc6979_signature is deprecated and will " "be removed in a future version, use decode_dss_signature instead.", utils.DeprecatedIn10, stacklevel=2 ) return decode_dss_signature(signature) def decode_dss_signature(signature): try: data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue()) except PyAsn1Error: raise ValueError("Invalid signature data. Unable to decode ASN.1") if remaining: raise ValueError( "The signature contains bytes after the end of the ASN.1 sequence." ) r = int(data.getComponentByName('r')) s = int(data.getComponentByName('s')) return (r, s) def encode_rfc6979_signature(r, s): warnings.warn( "encode_rfc6979_signature is deprecated and will " "be removed in a future version, use encode_dss_signature instead.", utils.DeprecatedIn10, stacklevel=2 ) return encode_dss_signature(r, s) def encode_dss_signature(r, s): if ( not isinstance(r, six.integer_types) or not isinstance(s, six.integer_types) ): raise ValueError("Both r and s must be integers") sig = _DSSSigValue() sig.setComponentByName('r', r) sig.setComponentByName('s', s) return encoder.encode(sig)
mit
192,957,968,989,777,060
27.042254
79
0.681567
false
cwtaylor/viper
viper/modules/peepdf/colorama/ansitowin32.py
43
6182
import re import sys from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style from .winterm import WinTerm, WinColor, WinStyle from .win32 import windll if windll is not None: winterm = WinTerm() def is_a_tty(stream): return hasattr(stream, 'isatty') and stream.isatty() class StreamWrapper(object): ''' Wraps a stream (such as stdout), acting as a transparent proxy for all attribute access apart from method 'write()', which is delegated to our Converter instance. ''' def __init__(self, wrapped, converter): # double-underscore everything to prevent clashes with names of # attributes on the wrapped stream object. self.__wrapped = wrapped self.__convertor = converter def __getattr__(self, name): return getattr(self.__wrapped, name) def write(self, text): self.__convertor.write(text) class AnsiToWin32(object): ''' Implements a 'write()' method which, on Windows, will strip ANSI character sequences from the text, and if outputting to a tty, will convert them into win32 function calls. ''' ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # The wrapped stream (normally sys.stdout or sys.stderr) self.wrapped = wrapped # should we reset colors to defaults after every .write() self.autoreset = autoreset # create the proxy wrapping our output stream self.stream = StreamWrapper(wrapped, self) on_windows = sys.platform.startswith('win') # should we strip ANSI sequences from our output? if strip is None: strip = on_windows self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: convert = on_windows and is_a_tty(wrapped) self.convert = convert # dict of ansi codes to win32 functions and parameters self.win32_calls = self.get_win32_calls() # are we wrapping stderr? self.on_stderr = self.wrapped is sys.stderr def should_wrap(self): ''' True if this class is actually needed. If false, then the output stream will not be affected, nor will win32 calls be issued, so wrapping stdout is not actually required. This will generally be False on non-Windows platforms, unless optional functionality like autoreset has been requested using kwargs to init() ''' return self.convert or self.strip or self.autoreset def get_win32_calls(self): if self.convert and winterm: return { AnsiStyle.RESET_ALL: (winterm.reset_all, ), AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), AnsiFore.RED: (winterm.fore, WinColor.RED), AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), AnsiFore.WHITE: (winterm.fore, WinColor.GREY), AnsiFore.RESET: (winterm.fore, ), AnsiBack.BLACK: (winterm.back, WinColor.BLACK), AnsiBack.RED: (winterm.back, WinColor.RED), AnsiBack.GREEN: (winterm.back, WinColor.GREEN), AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), AnsiBack.BLUE: (winterm.back, WinColor.BLUE), AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), AnsiBack.CYAN: (winterm.back, WinColor.CYAN), AnsiBack.WHITE: (winterm.back, WinColor.GREY), AnsiBack.RESET: (winterm.back, ), } def write(self, text): if self.strip or self.convert: self.write_and_convert(text) else: self.wrapped.write(text) self.wrapped.flush() if self.autoreset: self.reset_all() def reset_all(self): if self.convert: self.call_win32('m', (0,)) else: self.wrapped.write(Style.RESET_ALL) def write_and_convert(self, text): ''' Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls. ''' cursor = 0 for match in self.ANSI_RE.finditer(text): start, end = match.span() self.write_plain_text(text, cursor, start) self.convert_ansi(*match.groups()) cursor = end self.write_plain_text(text, cursor, len(text)) def write_plain_text(self, text, start, end): if start < end: self.wrapped.write(text[start:end]) self.wrapped.flush() def convert_ansi(self, paramstring, command): if self.convert: params = self.extract_params(paramstring) self.call_win32(command, params) def extract_params(self, paramstring): def split(paramstring): for p in paramstring.split(';'): if p != '': yield int(p) return tuple(split(paramstring)) def call_win32(self, command, params): if params == []: params = [0] if command == 'm': for param in params: if param in self.win32_calls: func_args = self.win32_calls[param] func = func_args[0] args = func_args[1:] kwargs = dict(on_stderr=self.on_stderr) func(*args, **kwargs)
bsd-3-clause
-1,934,969,445,790,508,500
33.125
79
0.571498
false
glorizen/nupic
tests/unit/nupic/research/temporal_memory_test.py
19
21695
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ TODO: Mock out all function calls. TODO: Make default test TM instance simpler, with 4 cells per column. """ import tempfile import unittest from nupic.data.generators.pattern_machine import PatternMachine from nupic.data.generators.sequence_machine import SequenceMachine from nupic.research.temporal_memory import TemporalMemory try: import capnp except ImportError: capnp = None if capnp: from nupic.proto import TemporalMemoryProto_capnp class TemporalMemoryTest(unittest.TestCase): def setUp(self): self.tm = TemporalMemory() def testInitInvalidParams(self): # Invalid columnDimensions kwargs = {"columnDimensions": [], "cellsPerColumn": 32} self.assertRaises(ValueError, TemporalMemory, **kwargs) # Invalid cellsPerColumn kwargs = {"columnDimensions": [2048], "cellsPerColumn": 0} self.assertRaises(ValueError, TemporalMemory, **kwargs) kwargs = {"columnDimensions": [2048], "cellsPerColumn": -10} self.assertRaises(ValueError, TemporalMemory, **kwargs) def testActivateCorrectlyPredictiveCells(self): tm = self.tm prevPredictiveCells = set([0, 237, 1026, 26337, 26339, 55536]) activeColumns = set([32, 47, 823]) prevMatchingCells = set() (activeCells, winnerCells, predictedColumns, predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells, prevMatchingCells, activeColumns) self.assertEqual(activeCells, set([1026, 26337, 26339])) self.assertEqual(winnerCells, set([1026, 26337, 26339])) self.assertEqual(predictedColumns, set([32, 823])) self.assertEqual(predictedInactiveCells, set()) def testActivateCorrectlyPredictiveCellsEmpty(self): tm = self.tm # No previous predictive cells, no active columns prevPredictiveCells = set() activeColumns = set() prevMatchingCells = set() (activeCells, winnerCells, predictedColumns, predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells, prevMatchingCells, activeColumns) self.assertEqual(activeCells, set()) self.assertEqual(winnerCells, set()) self.assertEqual(predictedColumns, set()) self.assertEqual(predictedInactiveCells, set()) # No previous predictive cells, with active columns prevPredictiveCells = set() activeColumns = set([32, 47, 823]) prevMatchingCells = set() (activeCells, winnerCells, predictedColumns, predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells, prevMatchingCells, activeColumns) self.assertEqual(activeCells, set()) self.assertEqual(winnerCells, set()) self.assertEqual(predictedColumns, set()) self.assertEqual(predictedInactiveCells, set()) # No active columns, with previously predictive cells prevPredictiveCells = set([0, 237, 1026, 26337, 26339, 55536]) activeColumns = set() prevMatchingCells = set() (activeCells, winnerCells, predictedColumns, predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells, prevMatchingCells, activeColumns) self.assertEqual(activeCells, set()) self.assertEqual(winnerCells, set()) self.assertEqual(predictedColumns, set()) self.assertEqual(predictedInactiveCells, set()) def testActivateCorrectlyPredictiveCellsOrphan(self): tm = self.tm tm.predictedSegmentDecrement = 0.001 prevPredictiveCells = set([]) activeColumns = set([32, 47, 823]) prevMatchingCells = set([32, 47]) (activeCells, winnerCells, predictedColumns, predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells, prevMatchingCells, activeColumns) self.assertEqual(activeCells, set([])) self.assertEqual(winnerCells, set([])) self.assertEqual(predictedColumns, set([])) self.assertEqual(predictedInactiveCells, set([32,47])) def testBurstColumns(self): tm = TemporalMemory( cellsPerColumn=4, connectedPermanence=0.50, minThreshold=1, seed=42 ) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) connections.createSynapse(0, 37, 0.4) connections.createSynapse(0, 477, 0.9) connections.createSegment(0) connections.createSynapse(1, 49, 0.9) connections.createSynapse(1, 3, 0.8) connections.createSegment(1) connections.createSynapse(2, 733, 0.7) connections.createSegment(108) connections.createSynapse(3, 486, 0.9) activeColumns = set([0, 1, 26]) predictedColumns = set([26]) prevActiveCells = set([23, 37, 49, 733]) prevWinnerCells = set([23, 37, 49, 733]) (activeCells, winnerCells, learningSegments) = tm.burstColumns(activeColumns, predictedColumns, prevActiveCells, prevWinnerCells, connections) self.assertEqual(activeCells, set([0, 1, 2, 3, 4, 5, 6, 7])) randomWinner = 4 self.assertEqual(winnerCells, set([0, randomWinner])) # 4 is randomly chosen cell self.assertEqual(learningSegments, set([0, 4])) # 4 is new segment created # Check that new segment was added to winner cell (6) in column 1 self.assertEqual(connections.segmentsForCell(randomWinner), set([4])) def testBurstColumnsEmpty(self): tm = self.tm activeColumns = set() predictedColumns = set() prevActiveCells = set() prevWinnerCells = set() connections = tm.connections (activeCells, winnerCells, learningSegments) = tm.burstColumns(activeColumns, predictedColumns, prevActiveCells, prevWinnerCells, connections) self.assertEqual(activeCells, set()) self.assertEqual(winnerCells, set()) self.assertEqual(learningSegments, set()) def testLearnOnSegments(self): tm = TemporalMemory(maxNewSynapseCount=2) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) connections.createSynapse(0, 37, 0.4) connections.createSynapse(0, 477, 0.9) connections.createSegment(1) connections.createSynapse(1, 733, 0.7) connections.createSegment(8) connections.createSynapse(2, 486, 0.9) connections.createSegment(100) prevActiveSegments = set([0, 2]) learningSegments = set([1, 3]) prevActiveCells = set([23, 37, 733]) winnerCells = set([0]) prevWinnerCells = set([10, 11, 12, 13, 14]) predictedInactiveCells = set() prevMatchingSegments = set() tm.learnOnSegments(prevActiveSegments, learningSegments, prevActiveCells, winnerCells, prevWinnerCells, connections, predictedInactiveCells, prevMatchingSegments) # Check segment 0 synapseData = connections.dataForSynapse(0) self.assertAlmostEqual(synapseData.permanence, 0.7) synapseData = connections.dataForSynapse(1) self.assertAlmostEqual(synapseData.permanence, 0.5) synapseData = connections.dataForSynapse(2) self.assertAlmostEqual(synapseData.permanence, 0.8) # Check segment 1 synapseData = connections.dataForSynapse(3) self.assertAlmostEqual(synapseData.permanence, 0.8) self.assertEqual(len(connections.synapsesForSegment(1)), 2) # Check segment 2 synapseData = connections.dataForSynapse(4) self.assertAlmostEqual(synapseData.permanence, 0.9) self.assertEqual(len(connections.synapsesForSegment(2)), 1) # Check segment 3 self.assertEqual(len(connections.synapsesForSegment(3)), 2) def testComputePredictiveCells(self): tm = TemporalMemory(activationThreshold=2, minThreshold=2, predictedSegmentDecrement=0.004) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) connections.createSynapse(0, 37, 0.5) connections.createSynapse(0, 477, 0.9) connections.createSegment(1) connections.createSynapse(1, 733, 0.7) connections.createSynapse(1, 733, 0.4) connections.createSegment(1) connections.createSynapse(2, 974, 0.9) connections.createSegment(8) connections.createSynapse(3, 486, 0.9) connections.createSegment(100) activeCells = set([23, 37, 733, 974]) (activeSegments, predictiveCells, matchingSegments, matchingCells) = tm.computePredictiveCells(activeCells, connections) self.assertEqual(activeSegments, set([0])) self.assertEqual(predictiveCells, set([0])) self.assertEqual(matchingSegments, set([0,1])) self.assertEqual(matchingCells, set([0,1])) def testBestMatchingCell(self): tm = TemporalMemory( connectedPermanence=0.50, minThreshold=1, seed=42 ) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) connections.createSynapse(0, 37, 0.4) connections.createSynapse(0, 477, 0.9) connections.createSegment(0) connections.createSynapse(1, 49, 0.9) connections.createSynapse(1, 3, 0.8) connections.createSegment(1) connections.createSynapse(2, 733, 0.7) connections.createSegment(108) connections.createSynapse(3, 486, 0.9) activeCells = set([23, 37, 49, 733]) self.assertEqual(tm.bestMatchingCell(tm.cellsForColumn(0), activeCells, connections), (0, 0)) self.assertEqual(tm.bestMatchingCell(tm.cellsForColumn(3), # column containing cell 108 activeCells, connections), (103, None)) # Random cell from column self.assertEqual(tm.bestMatchingCell(tm.cellsForColumn(999), activeCells, connections), (31979, None)) # Random cell from column def testBestMatchingCellFewestSegments(self): tm = TemporalMemory( columnDimensions=[2], cellsPerColumn=2, connectedPermanence=0.50, minThreshold=1, seed=42 ) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 3, 0.3) activeSynapsesForSegment = set([]) for _ in range(100): # Never pick cell 0, always pick cell 1 (cell, _) = tm.bestMatchingCell(tm.cellsForColumn(0), activeSynapsesForSegment, connections) self.assertEqual(cell, 1) def testBestMatchingSegment(self): tm = TemporalMemory( connectedPermanence=0.50, minThreshold=1 ) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) connections.createSynapse(0, 37, 0.4) connections.createSynapse(0, 477, 0.9) connections.createSegment(0) connections.createSynapse(1, 49, 0.9) connections.createSynapse(1, 3, 0.8) connections.createSegment(1) connections.createSynapse(2, 733, 0.7) connections.createSegment(8) connections.createSynapse(3, 486, 0.9) activeCells = set([23, 37, 49, 733]) self.assertEqual(tm.bestMatchingSegment(0, activeCells, connections), (0, 2)) self.assertEqual(tm.bestMatchingSegment(1, activeCells, connections), (2, 1)) self.assertEqual(tm.bestMatchingSegment(8, activeCells, connections), (None, None)) self.assertEqual(tm.bestMatchingSegment(100, activeCells, connections), (None, None)) def testLeastUsedCell(self): tm = TemporalMemory( columnDimensions=[2], cellsPerColumn=2, seed=42 ) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 3, 0.3) for _ in range(100): # Never pick cell 0, always pick cell 1 self.assertEqual(tm.leastUsedCell(tm.cellsForColumn(0), connections), 1) def testAdaptSegment(self): tm = self.tm connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) connections.createSynapse(0, 37, 0.4) connections.createSynapse(0, 477, 0.9) tm.adaptSegment(0, set([0, 1]), connections, tm.permanenceIncrement, tm.permanenceDecrement) synapseData = connections.dataForSynapse(0) self.assertAlmostEqual(synapseData.permanence, 0.7) synapseData = connections.dataForSynapse(1) self.assertAlmostEqual(synapseData.permanence, 0.5) synapseData = connections.dataForSynapse(2) self.assertAlmostEqual(synapseData.permanence, 0.8) def testAdaptSegmentToMax(self): tm = self.tm connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.9) tm.adaptSegment(0, set([0]), connections, tm.permanenceIncrement, tm.permanenceDecrement) synapseData = connections.dataForSynapse(0) self.assertAlmostEqual(synapseData.permanence, 1.0) # Now permanence should be at max tm.adaptSegment(0, set([0]), connections, tm.permanenceIncrement, tm.permanenceDecrement) synapseData = connections.dataForSynapse(0) self.assertAlmostEqual(synapseData.permanence, 1.0) def testAdaptSegmentToMin(self): tm = self.tm connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.1) tm.adaptSegment(0, set(), connections, tm.permanenceIncrement, tm.permanenceDecrement) synapses = connections.synapsesForSegment(0) self.assertFalse(0 in synapses) def testPickCellsToLearnOn(self): tm = TemporalMemory(seed=42) connections = tm.connections connections.createSegment(0) winnerCells = set([4, 47, 58, 93]) self.assertEqual(tm.pickCellsToLearnOn(2, 0, winnerCells, connections), set([4, 93])) # randomly picked self.assertEqual(tm.pickCellsToLearnOn(100, 0, winnerCells, connections), set([4, 47, 58, 93])) self.assertEqual(tm.pickCellsToLearnOn(0, 0, winnerCells, connections), set()) def testPickCellsToLearnOnAvoidDuplicates(self): tm = TemporalMemory(seed=42) connections = tm.connections connections.createSegment(0) connections.createSynapse(0, 23, 0.6) winnerCells = set([23]) # Ensure that no additional (duplicate) cells were picked self.assertEqual(tm.pickCellsToLearnOn(2, 0, winnerCells, connections), set()) def testColumnForCell1D(self): tm = TemporalMemory( columnDimensions=[2048], cellsPerColumn=5 ) self.assertEqual(tm.columnForCell(0), 0) self.assertEqual(tm.columnForCell(4), 0) self.assertEqual(tm.columnForCell(5), 1) self.assertEqual(tm.columnForCell(10239), 2047) def testColumnForCell2D(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=4 ) self.assertEqual(tm.columnForCell(0), 0) self.assertEqual(tm.columnForCell(3), 0) self.assertEqual(tm.columnForCell(4), 1) self.assertEqual(tm.columnForCell(16383), 4095) def testColumnForCellInvalidCell(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=4 ) try: tm.columnForCell(16383) except IndexError: self.fail("IndexError raised unexpectedly") args = [16384] self.assertRaises(IndexError, tm.columnForCell, *args) args = [-1] self.assertRaises(IndexError, tm.columnForCell, *args) def testCellsForColumn1D(self): tm = TemporalMemory( columnDimensions=[2048], cellsPerColumn=5 ) expectedCells = set([5, 6, 7, 8, 9]) self.assertEqual(tm.cellsForColumn(1), expectedCells) def testCellsForColumn2D(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=4 ) expectedCells = set([256, 257, 258, 259]) self.assertEqual(tm.cellsForColumn(64), expectedCells) def testCellsForColumnInvalidColumn(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=4 ) try: tm.cellsForColumn(4095) except IndexError: self.fail("IndexError raised unexpectedly") args = [4096] self.assertRaises(IndexError, tm.cellsForColumn, *args) args = [-1] self.assertRaises(IndexError, tm.cellsForColumn, *args) def testNumberOfColumns(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=32 ) self.assertEqual(tm.numberOfColumns(), 64 * 64) def testNumberOfCells(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=32 ) self.assertEqual(tm.numberOfCells(), 64 * 64 * 32) def testMapCellsToColumns(self): tm = TemporalMemory( columnDimensions=[100], cellsPerColumn=4 ) columnsForCells = tm.mapCellsToColumns(set([0, 1, 2, 5, 399])) self.assertEqual(columnsForCells[0], set([0, 1, 2])) self.assertEqual(columnsForCells[1], set([5])) self.assertEqual(columnsForCells[99], set([399])) @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteRead(self): tm1 = TemporalMemory( columnDimensions=[100], cellsPerColumn=4, activationThreshold=7, initialPermanence=0.37, connectedPermanence=0.58, minThreshold=4, maxNewSynapseCount=18, permanenceIncrement=0.23, permanenceDecrement=0.08, seed=91 ) # Run some data through before serializing self.patternMachine = PatternMachine(100, 4) self.sequenceMachine = SequenceMachine(self.patternMachine) sequence = self.sequenceMachine.generateFromNumbers(range(5)) for _ in range(3): for pattern in sequence: tm1.compute(pattern) proto1 = TemporalMemoryProto_capnp.TemporalMemoryProto.new_message() tm1.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = TemporalMemoryProto_capnp.TemporalMemoryProto.read(f) # Load the deserialized proto tm2 = TemporalMemory.read(proto2) # Check that the two temporal memory objects have the same attributes self.assertEqual(tm1, tm2) # Run a couple records through after deserializing and check results match tm1.compute(self.patternMachine.get(0)) tm2.compute(self.patternMachine.get(0)) self.assertEqual(tm1.activeCells, tm2.activeCells) self.assertEqual(tm1.predictiveCells, tm2.predictiveCells) self.assertEqual(tm1.winnerCells, tm2.winnerCells) self.assertEqual(tm1.connections, tm2.connections) tm1.compute(self.patternMachine.get(3)) tm2.compute(self.patternMachine.get(3)) self.assertEqual(tm1.activeCells, tm2.activeCells) self.assertEqual(tm1.predictiveCells, tm2.predictiveCells) self.assertEqual(tm1.winnerCells, tm2.winnerCells) self.assertEqual(tm1.connections, tm2.connections) if __name__ == '__main__': unittest.main()
agpl-3.0
-1,946,060,676,359,409,700
30.215827
95
0.633049
false
hradec/cortex
python/IECore/ClassLoader.py
12
10600
########################################################################## # # Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import imp import glob import re import os.path import threading from fnmatch import fnmatch from IECore import Msg, msg, SearchPath, warning ## This class defines methods for creating instances of classes # defined in python modules on disk. We could just use the standard # import mechanism for this but this gives us queries over what is # available and versioning and suchlike, and uses a different set # of searchpaths to the standard python paths. It's intended for # loading classes derived from Op, ParameterisedProcedural and similar # extension classes, and allows us to create small versioned units # of functionality for use all over the place - the ieCore "do" script # uses the ClassLoader to find operations it can perform for instance. # This class will find files with the following template path: # <any path>/<className>/<className>-<version>.py # Where [] represents optional field. # And for performance sake, it will not explore directories which # contain files that match this: # <any path>/<className>/<className>*.* class ClassLoader : ## Creates a ClassLoader which will load # classes found on the SearchPath object passed # in. def __init__( self, searchPaths ) : self.__searchPaths = searchPaths self.__defaultVersions = {} self.__loadMutex = threading.RLock() self.refresh() ## Returns a copy of the searchpath used to find classes. def searchPath( self ) : return SearchPath( self.__searchPaths ) ## Returns an alphabetically sorted list # of all the classes found # on the searchpaths. The optional matchString # performs glob style matching to narrow down # the set of names returned. def classNames( self, matchString = "*" ) : self.__findAllClasses() ### \todo Support re, and allow exclusions, etc... n = [ x for x in self.__classes.keys() if fnmatch( x, matchString ) ] n.sort() return n ## Returns the available versions of the specified # class as a list of ints, with the latest version last. # If the class doesn't exist returns an empty list. def versions( self, name ) : try : c = self.__findClass( name ) return c["versions"] except : return [] ## Sets the default version for the named class. # This is the version that is loaded if no version # is specified in the load() method. def setDefaultVersion( self, name, version ) : self.__validateVersion( version ) c = self.__findClass( name ) if not version in c["versions"] : raise RuntimeError( "Class \"%s\" has no version %d." % (name, version) ) self.__defaultVersions[name] = version ## Returns the default version for the named class. # This is the version that is loaded if no version # is specified in the load() method. If it has not # been set explicitly with setDefaultVersion() then # it defaults to the highest available version. def getDefaultVersion( self, name ) : c = self.__findClass( name ) v = self.__defaultVersions.get( name, c["versions"][-1] ) if not v in c["versions"] : msg( Msg.Level.Warning, "ClassLoader.getDefaultVersion", "Version %d doesn't exist, reverting to version %d." % ( v, c["versions"][-1] ) ) v = c["versions"][-1] self.__defaultVersions[name] = v return v ## Loads the specified version of the named class. # Version defaults to getDefaultVersion( name ) if # not specified. Note that this returns the actual class # object itself rather than an instance of that class. # It also adds two class attributes named "path" and "version" # with the info necessary to reload the Op from ClassLoader. def load( self, name, version = None ) : with self.__loadMutex: c = self.__findClass( name ) if not version : version = self.getDefaultVersion( name ) if not version in self.versions( name ) : raise RuntimeError( "Class \"%s\" has no version %d." % (name, version) ) if version in c["imports"] : return c["imports"][version] nameTail = os.path.basename( name ) fileName = os.path.join( name, nameTail + "-" + str(version) + ".py" ) fileName = self.__searchPaths.find( fileName ) if fileName=="" : raise IOError( "Unable to find implementation file for class \"%s\" version %d." % (name, version) ) fileForLoad = open( fileName, "r" ) try : module = imp.load_module( "IECoreClassLoader" + name.replace( ".", "_" ) + str( version ), fileForLoad, fileName, ( ".py", "r", imp.PY_SOURCE ) ) finally : fileForLoad.close() if not getattr( module, nameTail, None ) : raise IOError( "File \"%s\" does not define a class named \"%s\"." % ( fileName, nameTail ) ) result = getattr( module, nameTail ) if getattr( result, 'staticTypeName', None ) == getattr( result.__bases__[0], 'staticTypeName', None ) : warning( "Class \"%s\" has the same staticTypeName as its Base Class. Perhaps you should call registerRunTimeTyped." % name ) result.path = name result.version = version c["imports"][version] = result return result ## The ClassLoader uses a caching mechanism to speed # up frequent reloads of the same class. This method # can be used to force an update of the cache to # reflect changes on the filesystem. def refresh( self ) : # __classes is a dictionary mapping from a class name # to information for that class in the following form # { # "versions" : [], # a list containing all the available versions for that class # "imports" : {}, # a dictionary mapping from version numbers to the actual class definition # # this is filled in lazily by load() # } # this will be filled in lazily by __findClass and __findAllClasses self.__classes = {} self.__foundAllClasses = False __defaultLoaders = {} __defaultLoaderMutex = threading.Lock() ## Returns a ClassLoader configured to load from the paths defined by the # specified environment variable. The same object is returned each time, # allowing one loader to be shared by many callers. @classmethod def defaultLoader( cls, envVar ) : with cls.__defaultLoaderMutex: loader = cls.__defaultLoaders.get( envVar, None ) if loader : return loader sp = "" if envVar in os.environ : sp = os.environ[envVar] else : msg( Msg.Level.Warning, "ClassLoader.defaultLoader", "Environment variable %s not set." % envVar ) loader = cls( SearchPath( os.path.expandvars( sp ), ":" ) ) cls.__defaultLoaders[envVar] = loader return loader ## Returns a ClassLoader configured to load from the # paths defined by the IECORE_OP_PATHS environment variable. The # same object is returned each time, allowing one loader to be # shared by many callers. @classmethod def defaultOpLoader( cls ) : return cls.defaultLoader( "IECORE_OP_PATHS" ) ## Returns a ClassLoader configured to load from the # paths defined by the IECORE_PROCEDURAL_PATHS environment variable. The # same object is returned each time, allowing one loader to be # shared by many callers. @classmethod def defaultProceduralLoader( cls ) : return cls.defaultLoader( "IECORE_PROCEDURAL_PATHS" ) def __updateClassFromSearchPath( self, searchPath, name ) : pattern = re.compile( ".*-(\d+).py$" ) pruneDir = False nameTail = os.path.split( name )[-1] # globbing for any extension rather than .py to avoid exploring shader # directories without Python files. Function returns true on those cases. gf = glob.glob( os.path.join( searchPath, name, nameTail + "*.*" ) ) for f in gf : pruneDir = True m = re.match( pattern, f ) try : version = int( m.group( 1 ) ) except : continue c = self.__classes.setdefault( name, { "versions" : [], "imports" : {} } ) if not version in c["versions"]: c["versions"].append( version ) c["versions"].sort() return pruneDir def __findClass( self, name ) : if not name in self.__classes and not self.__foundAllClasses : for path in self.__searchPaths.paths : self.__updateClassFromSearchPath( path, name ) if name in self.__classes : return self.__classes[name] else : raise RuntimeError( "Class \"%s\" doesn't exist." % name ) def __findAllClasses( self ) : if self.__foundAllClasses : return self.__classes = {} for path in self.__searchPaths.paths : for root, dirs, files in os.walk( path ) : if path.endswith( '/' ) : nameBase = root[len(path):] else : nameBase = root[len(path)+1:] dirsToPrune = set() for d in dirs : if self.__updateClassFromSearchPath( path, os.path.join( nameBase, d ) ) : dirsToPrune.add( d ) for d in dirsToPrune : dirs.remove( d ) self.__foundAllClasses = True # throws an exception if the version is no good @staticmethod def __validateVersion( version ) : if not type( version ) is int : raise TypeError( "Version must be an integer" )
bsd-3-clause
-2,364,115,616,674,663,000
33.083601
149
0.686887
false
vileopratama/vitech
src/addons/account/wizard/account_financial_report.py
43
2809
# -*- coding: utf-8 -*- from openerp import api, fields, models class AccountingReport(models.TransientModel): _name = "accounting.report" _inherit = "account.common.report" _description = "Accounting Report" @api.model def _get_account_report(self): reports = [] if self._context.get('active_id'): menu = self.env['ir.ui.menu'].browse(self._context.get('active_id')).name reports = self.env['account.financial.report'].search([('name', 'ilike', menu)]) return reports and reports[0] or False enable_filter = fields.Boolean(string='Enable Comparison') account_report_id = fields.Many2one('account.financial.report', string='Account Reports', required=True, default=_get_account_report) label_filter = fields.Char(string='Column Label', help="This label will be displayed on report to show the balance computed for the given comparison filter.") filter_cmp = fields.Selection([('filter_no', 'No Filters'), ('filter_date', 'Date')], string='Filter by', required=True, default='filter_no') date_from_cmp = fields.Date(string='Start Date') date_to_cmp = fields.Date(string='End Date') debit_credit = fields.Boolean(string='Display Debit/Credit Columns', help="This option allows you to get more details about the way your balances are computed. Because it is space consuming, we do not allow to use it while doing a comparison.") def _build_comparison_context(self, data): result = {} result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or '' if data['form']['filter_cmp'] == 'filter_date': result['date_from'] = data['form']['date_from_cmp'] result['date_to'] = data['form']['date_to_cmp'] result['strict_range'] = True return result @api.multi def check_report(self): res = super(AccountingReport, self).check_report() data = {} data['form'] = self.read(['account_report_id', 'date_from_cmp', 'date_to_cmp', 'journal_ids', 'filter_cmp', 'target_move'])[0] for field in ['account_report_id']: if isinstance(data['form'][field], tuple): data['form'][field] = data['form'][field][0] comparison_context = self._build_comparison_context(data) res['data']['form']['comparison_context'] = comparison_context return res def _print_report(self, data): data['form'].update(self.read(['date_from_cmp', 'debit_credit', 'date_to_cmp', 'filter_cmp', 'account_report_id', 'enable_filter', 'label_filter', 'target_move'])[0]) return self.env['report'].get_action(self, 'account.report_financial', data=data)
mit
-8,505,433,429,684,724,000
54.078431
248
0.640441
false
ctdegroot/OpenFOAMTools
modules/FreeSurfaceCalculator.py
1
4388
import glob import numpy as np import os class FreeSurfaceCalculator(object): """Class to calculate the level of a free surface as a function of time. This assumes that sampled data is organized such that there is one folder per timestep, containing a single .xy file. The .xy file should contain two columns, the first of which contains a coordinate value representative of the "height" of the free surface and the second contains values of the volume fraction at those locations. The free surface height is calculated by interpolating from the .xy data the location where the volume fraction is equal to 0.5. Sample code that could be used in an OpenFOAM controlDict file to generate the data files is: functions ( elevation { type sets; functionObjectLibs ("libsampling.so"); outputControl timeStep; interpolationScheme cellPointFace; setFormat raw; fields ( alpha1 ); sets ( lineX1 { type uniform; axis distance; start (6.0 0 0.0005); end (6.0 0.5 0.0005); nPoints 500; } ); } ) Current limitations of this code, that could be addressed in the future are: - There must only be one .xy file in each directory. This could be extended to allow the free surface height to be calculated at multiple locations. - If there are multiple free surface locations (i.e. more than one location where the volume fraction equals 0.5) only the first to be encountered will be logged. This could be extended to look for multiple free surfaces which would be relevant in cases where there is entrapment of one phase inside of the other. """ def __init__(self, parentDir='.'): """Class initialization. Input parameters: parentDir : top level directory containing subdirectories for each timestep; defaults to current directory. """ self._parentDir = parentDir self._data = [] def _calculate(self): """Calculates the free surface height for each timestep.""" # Get all of the directories containing data; results are filtered to # only starting with a number to avoid getting potential hidden files/ # directories or other extraneous results dirs = glob.glob(os.path.join(self._parentDir, '[0-9]*')) # Loop through all of the directories; assume only one file is in each for dir in dirs: files = os.listdir(dir) # There should only be one .xy file, but this is necessary in case # there are hidden files fileName = None for file in files: if file.endswith('.xy') and not file.startswith('.'): fileName = file break # Load the xy data and assign to variables data = np.loadtxt(os.path.join(dir, fileName)) y = data[:,0] alpha = data[:,1] # Find the free surface location for i in range(alpha.size): if alpha[i] > 0.5 and alpha[i+1] < 0.5: break time = os.path.basename(dir) ys = np.interp(0.5, [alpha[i], alpha[i+1]], [y[i], y[i+1]]) # Save the data self._data.append([time, ys]) def writeCsvOutput(self, outputCsvFile): """Writes the free surface location as a function of time to csv file. Input parameters: outputCsvFile : file name where output will be written; if extension .csv is not present it will be added. """ self._calculate() if not outputCsvFile[-4:] == '.csv': outputCsvFile += '.csv' output = open(outputCsvFile, 'w') for row in self._data: output.write('{}, {}\n'.format(*row))
mit
4,544,724,996,992,386,000
35.272727
80
0.544895
false
sftd/scons
scons-local/SCons/Util.py
7
49072
"""SCons.Util Various utility functions go here. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __revision__ = "src/engine/SCons/Util.py 2014/03/02 14:18:15 garyo" import os import sys import copy import re import types from collections import UserDict, UserList, UserString # Don't "from types import ..." these because we need to get at the # types module later to look for UnicodeType. InstanceType = types.InstanceType MethodType = types.MethodType FunctionType = types.FunctionType try: unicode except NameError: UnicodeType = None else: UnicodeType = unicode def dictify(keys, values, result={}): for k, v in zip(keys, values): result[k] = v return result _altsep = os.altsep if _altsep is None and sys.platform == 'win32': # My ActivePython 2.0.1 doesn't set os.altsep! What gives? _altsep = '/' if _altsep: def rightmost_separator(path, sep): return max(path.rfind(sep), path.rfind(_altsep)) else: def rightmost_separator(path, sep): return path.rfind(sep) # First two from the Python Cookbook, just for completeness. # (Yeah, yeah, YAGNI...) def containsAny(str, set): """Check whether sequence str contains ANY of the items in set.""" for c in set: if c in str: return 1 return 0 def containsAll(str, set): """Check whether sequence str contains ALL of the items in set.""" for c in set: if c not in str: return 0 return 1 def containsOnly(str, set): """Check whether sequence str contains ONLY items in set.""" for c in str: if c not in set: return 0 return 1 def splitext(path): "Same as os.path.splitext() but faster." sep = rightmost_separator(path, os.sep) dot = path.rfind('.') # An ext is only real if it has at least one non-digit char if dot > sep and not containsOnly(path[dot:], "0123456789."): return path[:dot],path[dot:] else: return path,"" def updrive(path): """ Make the drive letter (if any) upper case. This is useful because Windows is inconsitent on the case of the drive letter, which can cause inconsistencies when calculating command signatures. """ drive, rest = os.path.splitdrive(path) if drive: path = drive.upper() + rest return path class NodeList(UserList): """This class is almost exactly like a regular list of Nodes (actually it can hold any object), with one important difference. If you try to get an attribute from this list, it will return that attribute from every item in the list. For example: >>> someList = NodeList([ ' foo ', ' bar ' ]) >>> someList.strip() [ 'foo', 'bar' ] """ def __nonzero__(self): return len(self.data) != 0 def __str__(self): return ' '.join(map(str, self.data)) def __iter__(self): return iter(self.data) def __call__(self, *args, **kwargs): result = [x(*args, **kwargs) for x in self.data] return self.__class__(result) def __getattr__(self, name): result = [getattr(x, name) for x in self.data] return self.__class__(result) _get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$') def get_environment_var(varstr): """Given a string, first determine if it looks like a reference to a single environment variable, like "$FOO" or "${FOO}". If so, return that variable with no decorations ("FOO"). If not, return None.""" mo=_get_env_var.match(to_String(varstr)) if mo: var = mo.group(1) if var[0] == '{': return var[1:-1] else: return var else: return None class DisplayEngine(object): print_it = True def __call__(self, text, append_newline=1): if not self.print_it: return if append_newline: text = text + '\n' try: sys.stdout.write(unicode(text)) except IOError: # Stdout might be connected to a pipe that has been closed # by now. The most likely reason for the pipe being closed # is that the user has press ctrl-c. It this is the case, # then SCons is currently shutdown. We therefore ignore # IOError's here so that SCons can continue and shutdown # properly so that the .sconsign is correctly written # before SCons exits. pass def set_mode(self, mode): self.print_it = mode def render_tree(root, child_func, prune=0, margin=[0], visited={}): """ Render a tree of nodes into an ASCII tree view. root - the root node of the tree child_func - the function called to get the children of a node prune - don't visit the same node twice margin - the format of the left margin to use for children of root. 1 results in a pipe, and 0 results in no pipe. visited - a dictionary of visited nodes in the current branch if not prune, or in the whole tree if prune. """ rname = str(root) children = child_func(root) retval = "" for pipe in margin[:-1]: if pipe: retval = retval + "| " else: retval = retval + " " if rname in visited: return retval + "+-[" + rname + "]\n" retval = retval + "+-" + rname + "\n" if not prune: visited = copy.copy(visited) visited[rname] = 1 for i in range(len(children)): margin.append(i<len(children)-1) retval = retval + render_tree(children[i], child_func, prune, margin, visited ) margin.pop() return retval IDX = lambda N: N and 1 or 0 def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}): """ Print a tree of nodes. This is like render_tree, except it prints lines directly instead of creating a string representation in memory, so that huge trees can be printed. root - the root node of the tree child_func - the function called to get the children of a node prune - don't visit the same node twice showtags - print status information to the left of each node line margin - the format of the left margin to use for children of root. 1 results in a pipe, and 0 results in no pipe. visited - a dictionary of visited nodes in the current branch if not prune, or in the whole tree if prune. """ rname = str(root) if showtags: if showtags == 2: legend = (' E = exists\n' + ' R = exists in repository only\n' + ' b = implicit builder\n' + ' B = explicit builder\n' + ' S = side effect\n' + ' P = precious\n' + ' A = always build\n' + ' C = current\n' + ' N = no clean\n' + ' H = no cache\n' + '\n') sys.stdout.write(unicode(legend)) tags = ['['] tags.append(' E'[IDX(root.exists())]) tags.append(' R'[IDX(root.rexists() and not root.exists())]) tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] + [0,2][IDX(root.has_builder())]]) tags.append(' S'[IDX(root.side_effect)]) tags.append(' P'[IDX(root.precious)]) tags.append(' A'[IDX(root.always_build)]) tags.append(' C'[IDX(root.is_up_to_date())]) tags.append(' N'[IDX(root.noclean)]) tags.append(' H'[IDX(root.nocache)]) tags.append(']') else: tags = [] def MMM(m): return [" ","| "][m] margins = list(map(MMM, margin[:-1])) children = child_func(root) if prune and rname in visited and children: sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + u'\n') return sys.stdout.write(''.join(tags + margins + ['+-', rname]) + u'\n') visited[rname] = 1 if children: margin.append(1) idx = IDX(showtags) for C in children[:-1]: print_tree(C, child_func, prune, idx, margin, visited) margin[-1] = 0 print_tree(children[-1], child_func, prune, idx, margin, visited) margin.pop() # Functions for deciding if things are like various types, mainly to # handle UserDict, UserList and UserString like their underlying types. # # Yes, all of this manual testing breaks polymorphism, and the real # Pythonic way to do all of this would be to just try it and handle the # exception, but handling the exception when it's not the right type is # often too slow. # We are using the following trick to speed up these # functions. Default arguments are used to take a snapshot of the # the global functions and constants used by these functions. This # transforms accesses to global variable into local variables # accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL). DictTypes = (dict, UserDict) ListTypes = (list, UserList) SequenceTypes = (list, tuple, UserList) # Note that profiling data shows a speed-up when comparing # explicitely with str and unicode instead of simply comparing # with basestring. (at least on Python 2.5.1) StringTypes = (str, unicode, UserString) # Empirically, it is faster to check explicitely for str and # unicode than for basestring. BaseStringTypes = (str, unicode) def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes): return isinstance(obj, DictTypes) def is_List(obj, isinstance=isinstance, ListTypes=ListTypes): return isinstance(obj, ListTypes) def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes): return isinstance(obj, SequenceTypes) def is_Tuple(obj, isinstance=isinstance, tuple=tuple): return isinstance(obj, tuple) def is_String(obj, isinstance=isinstance, StringTypes=StringTypes): return isinstance(obj, StringTypes) def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes): # Profiling shows that there is an impressive speed-up of 2x # when explicitely checking for strings instead of just not # sequence when the argument (i.e. obj) is already a string. # But, if obj is a not string then it is twice as fast to # check only for 'not sequence'. The following code therefore # assumes that the obj argument is a string must of the time. return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes) def do_flatten(sequence, result, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes): for item in sequence: if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes): result.append(item) else: do_flatten(item, result) def flatten(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes, do_flatten=do_flatten): """Flatten a sequence to a non-nested list. Flatten() converts either a single scalar or a nested sequence to a non-nested list. Note that flatten() considers strings to be scalars instead of sequences like Python would. """ if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes): return [obj] result = [] for item in obj: if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes): result.append(item) else: do_flatten(item, result) return result def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes, do_flatten=do_flatten): """Flatten a sequence to a non-nested list. Same as flatten(), but it does not handle the single scalar case. This is slightly more efficient when one knows that the sequence to flatten can not be a scalar. """ result = [] for item in sequence: if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes): result.append(item) else: do_flatten(item, result) return result # Generic convert-to-string functions that abstract away whether or # not the Python we're executing has Unicode support. The wrapper # to_String_for_signature() will use a for_signature() method if the # specified object has one. # def to_String(s, isinstance=isinstance, str=str, UserString=UserString, BaseStringTypes=BaseStringTypes): if isinstance(s,BaseStringTypes): # Early out when already a string! return s elif isinstance(s, UserString): # s.data can only be either a unicode or a regular # string. Please see the UserString initializer. return s.data else: return str(s) def to_String_for_subst(s, isinstance=isinstance, str=str, to_String=to_String, BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes, UserString=UserString): # Note that the test cases are sorted by order of probability. if isinstance(s, BaseStringTypes): return s elif isinstance(s, SequenceTypes): l = [] for e in s: l.append(to_String_for_subst(e)) return ' '.join( s ) elif isinstance(s, UserString): # s.data can only be either a unicode or a regular # string. Please see the UserString initializer. return s.data else: return str(s) def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst, AttributeError=AttributeError): try: f = obj.for_signature except AttributeError: return to_String_for_subst(obj) else: return f() # The SCons "semi-deep" copy. # # This makes separate copies of lists (including UserList objects) # dictionaries (including UserDict objects) and tuples, but just copies # references to anything else it finds. # # A special case is any object that has a __semi_deepcopy__() method, # which we invoke to create the copy. Currently only used by # BuilderDict to actually prevent the copy operation (as invalid on that object) # # The dispatch table approach used here is a direct rip-off from the # normal Python copy module. _semi_deepcopy_dispatch = d = {} def semi_deepcopy_dict(x, exclude = [] ): copy = {} for key, val in x.items(): # The regular Python copy.deepcopy() also deepcopies the key, # as follows: # # copy[semi_deepcopy(key)] = semi_deepcopy(val) # # Doesn't seem like we need to, but we'll comment it just in case. if key not in exclude: copy[key] = semi_deepcopy(val) return copy d[dict] = semi_deepcopy_dict def _semi_deepcopy_list(x): return list(map(semi_deepcopy, x)) d[list] = _semi_deepcopy_list def _semi_deepcopy_tuple(x): return tuple(map(semi_deepcopy, x)) d[tuple] = _semi_deepcopy_tuple def semi_deepcopy(x): copier = _semi_deepcopy_dispatch.get(type(x)) if copier: return copier(x) else: if hasattr(x, '__semi_deepcopy__') and callable(x.__semi_deepcopy__): return x.__semi_deepcopy__() elif isinstance(x, UserDict): return x.__class__(semi_deepcopy_dict(x)) elif isinstance(x, UserList): return x.__class__(_semi_deepcopy_list(x)) return x class Proxy(object): """A simple generic Proxy class, forwarding all calls to subject. So, for the benefit of the python newbie, what does this really mean? Well, it means that you can take an object, let's call it 'objA', and wrap it in this Proxy class, with a statement like this proxyObj = Proxy(objA), Then, if in the future, you do something like this x = proxyObj.var1, since Proxy does not have a 'var1' attribute (but presumably objA does), the request actually is equivalent to saying x = objA.var1 Inherit from this class to create a Proxy. Note that, with new-style classes, this does *not* work transparently for Proxy subclasses that use special .__*__() method names, because those names are now bound to the class, not the individual instances. You now need to know in advance which .__*__() method names you want to pass on to the underlying Proxy object, and specifically delegate their calls like this: class Foo(Proxy): __str__ = Delegate('__str__') """ def __init__(self, subject): """Wrap an object as a Proxy object""" self._subject = subject def __getattr__(self, name): """Retrieve an attribute from the wrapped object. If the named attribute doesn't exist, AttributeError is raised""" return getattr(self._subject, name) def get(self): """Retrieve the entire wrapped object""" return self._subject def __cmp__(self, other): if issubclass(other.__class__, self._subject.__class__): return cmp(self._subject, other) return cmp(self.__dict__, other.__dict__) class Delegate(object): """A Python Descriptor class that delegates attribute fetches to an underlying wrapped subject of a Proxy. Typical use: class Foo(Proxy): __str__ = Delegate('__str__') """ def __init__(self, attribute): self.attribute = attribute def __get__(self, obj, cls): if isinstance(obj, cls): return getattr(obj._subject, self.attribute) else: return self # attempt to load the windows registry module: can_read_reg = 0 try: import winreg can_read_reg = 1 hkey_mod = winreg RegOpenKeyEx = winreg.OpenKeyEx RegEnumKey = winreg.EnumKey RegEnumValue = winreg.EnumValue RegQueryValueEx = winreg.QueryValueEx RegError = winreg.error except ImportError: try: import win32api import win32con can_read_reg = 1 hkey_mod = win32con RegOpenKeyEx = win32api.RegOpenKeyEx RegEnumKey = win32api.RegEnumKey RegEnumValue = win32api.RegEnumValue RegQueryValueEx = win32api.RegQueryValueEx RegError = win32api.error except ImportError: class _NoError(Exception): pass RegError = _NoError if can_read_reg: HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER HKEY_USERS = hkey_mod.HKEY_USERS def RegGetValue(root, key): """This utility function returns a value in the registry without having to open the key first. Only available on Windows platforms with a version of Python that can read the registry. Returns the same thing as SCons.Util.RegQueryValueEx, except you just specify the entire path to the value, and don't have to bother opening the key first. So: Instead of: k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows\CurrentVersion') out = SCons.Util.RegQueryValueEx(k, 'ProgramFilesDir') You can write: out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir') """ # I would use os.path.split here, but it's not a filesystem # path... p = key.rfind('\\') + 1 keyp = key[:p-1] # -1 to omit trailing slash val = key[p:] k = RegOpenKeyEx(root, keyp) return RegQueryValueEx(k,val) else: try: e = WindowsError except NameError: # Make sure we have a definition of WindowsError so we can # run platform-independent tests of Windows functionality on # platforms other than Windows. (WindowsError is, in fact, an # OSError subclass on Windows.) class WindowsError(OSError): pass import builtins builtins.WindowsError = WindowsError else: del e HKEY_CLASSES_ROOT = None HKEY_LOCAL_MACHINE = None HKEY_CURRENT_USER = None HKEY_USERS = None def RegGetValue(root, key): raise WindowsError def RegOpenKeyEx(root, key): raise WindowsError if sys.platform == 'win32': def WhereIs(file, path=None, pathext=None, reject=[]): if path is None: try: path = os.environ['PATH'] except KeyError: return None if is_String(path): path = path.split(os.pathsep) if pathext is None: try: pathext = os.environ['PATHEXT'] except KeyError: pathext = '.COM;.EXE;.BAT;.CMD' if is_String(pathext): pathext = pathext.split(os.pathsep) for ext in pathext: if ext.lower() == file[-len(ext):].lower(): pathext = [''] break if not is_List(reject) and not is_Tuple(reject): reject = [reject] for dir in path: f = os.path.join(dir, file) for ext in pathext: fext = f + ext if os.path.isfile(fext): try: reject.index(fext) except ValueError: return os.path.normpath(fext) continue return None elif os.name == 'os2': def WhereIs(file, path=None, pathext=None, reject=[]): if path is None: try: path = os.environ['PATH'] except KeyError: return None if is_String(path): path = path.split(os.pathsep) if pathext is None: pathext = ['.exe', '.cmd'] for ext in pathext: if ext.lower() == file[-len(ext):].lower(): pathext = [''] break if not is_List(reject) and not is_Tuple(reject): reject = [reject] for dir in path: f = os.path.join(dir, file) for ext in pathext: fext = f + ext if os.path.isfile(fext): try: reject.index(fext) except ValueError: return os.path.normpath(fext) continue return None else: def WhereIs(file, path=None, pathext=None, reject=[]): import stat if path is None: try: path = os.environ['PATH'] except KeyError: return None if is_String(path): path = path.split(os.pathsep) if not is_List(reject) and not is_Tuple(reject): reject = [reject] for d in path: f = os.path.join(d, file) if os.path.isfile(f): try: st = os.stat(f) except OSError: # os.stat() raises OSError, not IOError if the file # doesn't exist, so in this case we let IOError get # raised so as to not mask possibly serious disk or # network issues. continue if stat.S_IMODE(st[stat.ST_MODE]) & 0111: try: reject.index(f) except ValueError: return os.path.normpath(f) continue return None def PrependPath(oldpath, newpath, sep = os.pathsep, delete_existing=1, canonicalize=None): """This prepends newpath elements to the given oldpath. Will only add any particular path once (leaving the first one it encounters and ignoring the rest, to preserve path order), and will os.path.normpath and os.path.normcase all paths to help assure this. This can also handle the case where the given old path variable is a list instead of a string, in which case a list will be returned instead of a string. Example: Old Path: "/foo/bar:/foo" New Path: "/biz/boom:/foo" Result: "/biz/boom:/foo:/foo/bar" If delete_existing is 0, then adding a path that exists will not move it to the beginning; it will stay where it is in the list. If canonicalize is not None, it is applied to each element of newpath before use. """ orig = oldpath is_list = 1 paths = orig if not is_List(orig) and not is_Tuple(orig): paths = paths.split(sep) is_list = 0 if is_String(newpath): newpaths = newpath.split(sep) elif not is_List(newpath) and not is_Tuple(newpath): newpaths = [ newpath ] # might be a Dir else: newpaths = newpath if canonicalize: newpaths=list(map(canonicalize, newpaths)) if not delete_existing: # First uniquify the old paths, making sure to # preserve the first instance (in Unix/Linux, # the first one wins), and remembering them in normpaths. # Then insert the new paths at the head of the list # if they're not already in the normpaths list. result = [] normpaths = [] for path in paths: if not path: continue normpath = os.path.normpath(os.path.normcase(path)) if normpath not in normpaths: result.append(path) normpaths.append(normpath) newpaths.reverse() # since we're inserting at the head for path in newpaths: if not path: continue normpath = os.path.normpath(os.path.normcase(path)) if normpath not in normpaths: result.insert(0, path) normpaths.append(normpath) paths = result else: newpaths = newpaths + paths # prepend new paths normpaths = [] paths = [] # now we add them only if they are unique for path in newpaths: normpath = os.path.normpath(os.path.normcase(path)) if path and not normpath in normpaths: paths.append(path) normpaths.append(normpath) if is_list: return paths else: return sep.join(paths) def AppendPath(oldpath, newpath, sep = os.pathsep, delete_existing=1, canonicalize=None): """This appends new path elements to the given old path. Will only add any particular path once (leaving the last one it encounters and ignoring the rest, to preserve path order), and will os.path.normpath and os.path.normcase all paths to help assure this. This can also handle the case where the given old path variable is a list instead of a string, in which case a list will be returned instead of a string. Example: Old Path: "/foo/bar:/foo" New Path: "/biz/boom:/foo" Result: "/foo/bar:/biz/boom:/foo" If delete_existing is 0, then adding a path that exists will not move it to the end; it will stay where it is in the list. If canonicalize is not None, it is applied to each element of newpath before use. """ orig = oldpath is_list = 1 paths = orig if not is_List(orig) and not is_Tuple(orig): paths = paths.split(sep) is_list = 0 if is_String(newpath): newpaths = newpath.split(sep) elif not is_List(newpath) and not is_Tuple(newpath): newpaths = [ newpath ] # might be a Dir else: newpaths = newpath if canonicalize: newpaths=list(map(canonicalize, newpaths)) if not delete_existing: # add old paths to result, then # add new paths if not already present # (I thought about using a dict for normpaths for speed, # but it's not clear hashing the strings would be faster # than linear searching these typically short lists.) result = [] normpaths = [] for path in paths: if not path: continue result.append(path) normpaths.append(os.path.normpath(os.path.normcase(path))) for path in newpaths: if not path: continue normpath = os.path.normpath(os.path.normcase(path)) if normpath not in normpaths: result.append(path) normpaths.append(normpath) paths = result else: # start w/ new paths, add old ones if not present, # then reverse. newpaths = paths + newpaths # append new paths newpaths.reverse() normpaths = [] paths = [] # now we add them only if they are unique for path in newpaths: normpath = os.path.normpath(os.path.normcase(path)) if path and not normpath in normpaths: paths.append(path) normpaths.append(normpath) paths.reverse() if is_list: return paths else: return sep.join(paths) if sys.platform == 'cygwin': def get_native_path(path): """Transforms an absolute path into a native path for the system. In Cygwin, this converts from a Cygwin path to a Windows one.""" return os.popen('cygpath -w ' + path).read().replace('\n', '') else: def get_native_path(path): """Transforms an absolute path into a native path for the system. Non-Cygwin version, just leave the path alone.""" return path display = DisplayEngine() def Split(arg): if is_List(arg) or is_Tuple(arg): return arg elif is_String(arg): return arg.split() else: return [arg] class CLVar(UserList): """A class for command-line construction variables. This is a list that uses Split() to split an initial string along white-space arguments, and similarly to split any strings that get added. This allows us to Do the Right Thing with Append() and Prepend() (as well as straight Python foo = env['VAR'] + 'arg1 arg2') regardless of whether a user adds a list or a string to a command-line construction variable. """ def __init__(self, seq = []): UserList.__init__(self, Split(seq)) def __add__(self, other): return UserList.__add__(self, CLVar(other)) def __radd__(self, other): return UserList.__radd__(self, CLVar(other)) def __coerce__(self, other): return (self, CLVar(other)) def __str__(self): return ' '.join(self.data) # A dictionary that preserves the order in which items are added. # Submitted by David Benjamin to ActiveState's Python Cookbook web site: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747 # Including fixes/enhancements from the follow-on discussions. class OrderedDict(UserDict): def __init__(self, dict = None): self._keys = [] UserDict.__init__(self, dict) def __delitem__(self, key): UserDict.__delitem__(self, key) self._keys.remove(key) def __setitem__(self, key, item): UserDict.__setitem__(self, key, item) if key not in self._keys: self._keys.append(key) def clear(self): UserDict.clear(self) self._keys = [] def copy(self): dict = OrderedDict() dict.update(self) return dict def items(self): return list(zip(self._keys, list(self.values()))) def keys(self): return self._keys[:] def popitem(self): try: key = self._keys[-1] except IndexError: raise KeyError('dictionary is empty') val = self[key] del self[key] return (key, val) def setdefault(self, key, failobj = None): UserDict.setdefault(self, key, failobj) if key not in self._keys: self._keys.append(key) def update(self, dict): for (key, val) in dict.items(): self.__setitem__(key, val) def values(self): return list(map(self.get, self._keys)) class Selector(OrderedDict): """A callable ordered dictionary that maps file suffixes to dictionary values. We preserve the order in which items are added so that get_suffix() calls always return the first suffix added.""" def __call__(self, env, source, ext=None): if ext is None: try: ext = source[0].suffix except IndexError: ext = "" try: return self[ext] except KeyError: # Try to perform Environment substitution on the keys of # the dictionary before giving up. s_dict = {} for (k,v) in self.items(): if k is not None: s_k = env.subst(k) if s_k in s_dict: # We only raise an error when variables point # to the same suffix. If one suffix is literal # and a variable suffix contains this literal, # the literal wins and we don't raise an error. raise KeyError(s_dict[s_k][0], k, s_k) s_dict[s_k] = (k,v) try: return s_dict[ext][1] except KeyError: try: return self[None] except KeyError: return None if sys.platform == 'cygwin': # On Cygwin, os.path.normcase() lies, so just report back the # fact that the underlying Windows OS is case-insensitive. def case_sensitive_suffixes(s1, s2): return 0 else: def case_sensitive_suffixes(s1, s2): return (os.path.normcase(s1) != os.path.normcase(s2)) def adjustixes(fname, pre, suf, ensure_suffix=False): if pre: path, fn = os.path.split(os.path.normpath(fname)) if fn[:len(pre)] != pre: fname = os.path.join(path, pre + fn) # Only append a suffix if the suffix we're going to add isn't already # there, and if either we've been asked to ensure the specific suffix # is present or there's no suffix on it at all. if suf and fname[-len(suf):] != suf and \ (ensure_suffix or not splitext(fname)[1]): fname = fname + suf return fname # From Tim Peters, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # (Also in the printed Python Cookbook.) def unique(s): """Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """ n = len(s) if n == 0: return [] # Try using a dict first, as that's the fastest and will usually # work. If it doesn't work, it will usually fail quickly, so it # usually doesn't cost much to *try* it. It requires that all the # sequence elements be hashable, and support equality comparison. u = {} try: for x in s: u[x] = 1 except TypeError: pass # move on to the next method else: return list(u.keys()) del u # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. # NOTE: Python's list.sort() was designed to be efficient in the # presence of many duplicate elements. This isn't true of all # sort functions in all languages or libraries, so this approach # is more effective in Python than it may be elsewhere. try: t = sorted(s) except TypeError: pass # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti = lasti + 1 i = i + 1 return t[:lasti] del t # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u # From Alex Martelli, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # First comment, dated 2001/10/13. # (Also in the printed Python Cookbook.) def uniquer(seq, idfun=None): if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) # in old Python versions: # if seen.has_key(marker) # but in new ones: if marker in seen: continue seen[marker] = 1 result.append(item) return result # A more efficient implementation of Alex's uniquer(), this avoids the # idfun() argument and function-call overhead by assuming that all # items in the sequence are hashable. def uniquer_hashables(seq): seen = {} result = [] for item in seq: #if not item in seen: if item not in seen: seen[item] = 1 result.append(item) return result # Much of the logic here was originally based on recipe 4.9 from the # Python CookBook, but we had to dumb it way down for Python 1.5.2. class LogicalLines(object): def __init__(self, fileobj): self.fileobj = fileobj def readline(self): result = [] while True: line = self.fileobj.readline() if not line: break if line[-2:] == '\\\n': result.append(line[:-2]) else: result.append(line) break return ''.join(result) def readlines(self): result = [] while True: line = self.readline() if not line: break result.append(line) return result class UniqueList(UserList): def __init__(self, seq = []): UserList.__init__(self, seq) self.unique = True def __make_unique(self): if not self.unique: self.data = uniquer_hashables(self.data) self.unique = True def __lt__(self, other): self.__make_unique() return UserList.__lt__(self, other) def __le__(self, other): self.__make_unique() return UserList.__le__(self, other) def __eq__(self, other): self.__make_unique() return UserList.__eq__(self, other) def __ne__(self, other): self.__make_unique() return UserList.__ne__(self, other) def __gt__(self, other): self.__make_unique() return UserList.__gt__(self, other) def __ge__(self, other): self.__make_unique() return UserList.__ge__(self, other) def __cmp__(self, other): self.__make_unique() return UserList.__cmp__(self, other) def __len__(self): self.__make_unique() return UserList.__len__(self) def __getitem__(self, i): self.__make_unique() return UserList.__getitem__(self, i) def __setitem__(self, i, item): UserList.__setitem__(self, i, item) self.unique = False def __getslice__(self, i, j): self.__make_unique() return UserList.__getslice__(self, i, j) def __setslice__(self, i, j, other): UserList.__setslice__(self, i, j, other) self.unique = False def __add__(self, other): result = UserList.__add__(self, other) result.unique = False return result def __radd__(self, other): result = UserList.__radd__(self, other) result.unique = False return result def __iadd__(self, other): result = UserList.__iadd__(self, other) result.unique = False return result def __mul__(self, other): result = UserList.__mul__(self, other) result.unique = False return result def __rmul__(self, other): result = UserList.__rmul__(self, other) result.unique = False return result def __imul__(self, other): result = UserList.__imul__(self, other) result.unique = False return result def append(self, item): UserList.append(self, item) self.unique = False def insert(self, i): UserList.insert(self, i) self.unique = False def count(self, item): self.__make_unique() return UserList.count(self, item) def index(self, item): self.__make_unique() return UserList.index(self, item) def reverse(self): self.__make_unique() UserList.reverse(self) def sort(self, *args, **kwds): self.__make_unique() return UserList.sort(self, *args, **kwds) def extend(self, other): UserList.extend(self, other) self.unique = False class Unbuffered(object): """ A proxy class that wraps a file object, flushing after every write, and delegating everything else to the wrapped object. """ def __init__(self, file): self.file = file self.softspace = 0 ## backward compatibility; not supported in Py3k def write(self, arg): try: self.file.write(arg) self.file.flush() except IOError: # Stdout might be connected to a pipe that has been closed # by now. The most likely reason for the pipe being closed # is that the user has press ctrl-c. It this is the case, # then SCons is currently shutdown. We therefore ignore # IOError's here so that SCons can continue and shutdown # properly so that the .sconsign is correctly written # before SCons exits. pass def __getattr__(self, attr): return getattr(self.file, attr) def make_path_relative(path): """ makes an absolute path name to a relative pathname. """ if os.path.isabs(path): drive_s,path = os.path.splitdrive(path) import re if not drive_s: path=re.compile("/*(.*)").findall(path)[0] else: path=path[1:] assert( not os.path.isabs( path ) ), path return path # The original idea for AddMethod() and RenameFunction() come from the # following post to the ActiveState Python Cookbook: # # ASPN: Python Cookbook : Install bound methods in an instance # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613 # # That code was a little fragile, though, so the following changes # have been wrung on it: # # * Switched the installmethod() "object" and "function" arguments, # so the order reflects that the left-hand side is the thing being # "assigned to" and the right-hand side is the value being assigned. # # * Changed explicit type-checking to the "try: klass = object.__class__" # block in installmethod() below so that it still works with the # old-style classes that SCons uses. # # * Replaced the by-hand creation of methods and functions with use of # the "new" module, as alluded to in Alex Martelli's response to the # following Cookbook post: # # ASPN: Python Cookbook : Dynamically added methods to a class # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732 def AddMethod(obj, function, name=None): """ Adds either a bound method to an instance or an unbound method to a class. If name is ommited the name of the specified function is used by default. Example: a = A() def f(self, x, y): self.z = x + y AddMethod(f, A, "add") a.add(2, 4) print a.z AddMethod(lambda self, i: self.l[i], a, "listIndex") print a.listIndex(5) """ if name is None: name = function.func_name else: function = RenameFunction(function, name) if hasattr(obj, '__class__') and obj.__class__ is not type: # "obj" is an instance, so it gets a bound method. setattr(obj, name, MethodType(function, obj, obj.__class__)) else: # "obj" is a class, so it gets an unbound method. setattr(obj, name, MethodType(function, None, obj)) def RenameFunction(function, name): """ Returns a function identical to the specified function, but with the specified name. """ return FunctionType(function.func_code, function.func_globals, name, function.func_defaults) md5 = False def MD5signature(s): return str(s) def MD5filesignature(fname, chunksize=65536): f = open(fname, "rb") result = f.read() f.close() return result try: import hashlib except ImportError: pass else: if hasattr(hashlib, 'md5'): md5 = True def MD5signature(s): m = hashlib.md5() m.update(str(s)) return m.hexdigest() def MD5filesignature(fname, chunksize=65536): m = hashlib.md5() f = open(fname, "rb") while True: blck = f.read(chunksize) if not blck: break m.update(str(blck)) f.close() return m.hexdigest() def MD5collect(signatures): """ Collects a list of signatures into an aggregate signature. signatures - a list of signatures returns - the aggregate signature """ if len(signatures) == 1: return signatures[0] else: return MD5signature(', '.join(signatures)) def silent_intern(x): """ Perform sys.intern() on the passed argument and return the result. If the input is ineligible (e.g. a unicode string) the original argument is returned and no exception is thrown. """ try: return sys.intern(x) except TypeError: return x # From Dinu C. Gherman, # Python Cookbook, second edition, recipe 6.17, p. 277. # Also: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205 # ASPN: Python Cookbook: Null Object Design Pattern #TODO??? class Null(object): class Null(object): """ Null objects always and reliably "do nothing." """ def __new__(cls, *args, **kwargs): if not '_instance' in vars(cls): cls._instance = super(Null, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return self def __repr__(self): return "Null(0x%08X)" % id(self) def __nonzero__(self): return False def __getattr__(self, name): return self def __setattr__(self, name, value): return self def __delattr__(self, name): return self class NullSeq(Null): def __len__(self): return 0 def __iter__(self): return iter(()) def __getitem__(self, i): return self def __delitem__(self, i): return self def __setitem__(self, i, v): return self del __revision__ # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
4,967,142,932,388,679,000
31.89008
119
0.597877
false
sjsucohort6/openstack
python/venv/lib/python2.7/site-packages/novaclient/v2/contrib/host_evacuate.py
5
2318
# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import base from novaclient.i18n import _ from novaclient.openstack.common import cliutils from novaclient import utils class EvacuateHostResponse(base.Resource): pass def _server_evacuate(cs, server, args): success = True error_message = "" try: cs.servers.evacuate(server=server['uuid'], host=args.target_host, on_shared_storage=args.on_shared_storage) except Exception as e: success = False error_message = _("Error while evacuating instance: %s") % e return EvacuateHostResponse(base.Manager, {"server_uuid": server['uuid'], "evacuate_accepted": success, "error_message": error_message}) @cliutils.arg('host', metavar='<host>', help='Name of host.') @cliutils.arg( '--target_host', metavar='<target_host>', default=None, help=_('Name of target host. If no host is specified the scheduler will ' 'select a target.')) @cliutils.arg( '--on-shared-storage', dest='on_shared_storage', action="store_true", default=False, help=_('Specifies whether all instances files are on shared storage')) def do_host_evacuate(cs, args): """Evacuate all instances from failed host.""" hypervisors = cs.hypervisors.search(args.host, servers=True) response = [] for hyper in hypervisors: if hasattr(hyper, 'servers'): for server in hyper.servers: response.append(_server_evacuate(cs, server, args)) utils.print_list(response, ["Server UUID", "Evacuate Accepted", "Error Message"])
mit
-4,473,016,173,373,141,000
35.21875
78
0.646247
false
rsig/jenkins-job-builder
jenkins_jobs/parallel.py
11
5198
#!/usr/bin/env python # Copyright (C) 2015 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Concurrent execution helper functions and classes from functools import wraps import logging from multiprocessing import cpu_count import threading import traceback try: import Queue as queue except ImportError: import queue logger = logging.getLogger(__name__) class TaskFunc(dict): """ Simple class to wrap around the information needed to run a function. """ def __init__(self, n_ord, func, args=None, kwargs=None): self['func'] = func self['args'] = args or [] self['kwargs'] = kwargs or {} self['ord'] = n_ord class Worker(threading.Thread): """ Class that actually does the work, gets a TaskFunc through the queue, runs its function with the passed parameters and returns the result If the string 'done' is passed instead of a TaskFunc instance, the thread will end. """ def __init__(self, in_queue, out_queue): threading.Thread.__init__(self) self.in_queue = in_queue self.out_queue = out_queue def run(self): while True: task = self.in_queue.get() if task == 'done': return try: res = task['func'](*task['args'], **task['kwargs']) except Exception as exc: res = exc traceback.print_exc() self.out_queue.put((task['ord'], res)) def concurrent(func): @wraps(func) def concurrentized(*args, **kwargs): """ This function will spawn workers and run the decorated function concurrently on the workers. It will not ensure the thread safety of the decorated function (the decorated function should be thread safe by itself). It accepts two special parameters: :arg list concurrentize: list of the arguments to pass to each of the runs, the results of each run will be returned in the same order. :arg int n_workers: number of workers to use, by default and if '0' passed will autodetect the number of cores and use that, if '1' passed, it will not use any workers and just run as if were not concurrentized everything. Example: > @concurrent > def sample(param1, param2, param3): > return param1 + param2 + param3 > > sample('param1', param2='val2', > concurrent=[ > {'param3': 'val3'}, > {'param3': 'val4'}, > {'param3': 'val5'}, > ]) > ['param1val2val3', 'param1val2val4', 'param1val2val5'] This will run the function `concurrentized_function` 3 times, in concurrent (depending on the number of detected cores) and return an array with the results of the executions in the same order the parameters were passed. """ n_workers = kwargs.pop('n_workers', 0) p_kwargs = kwargs.pop('concurrent', []) # if only one parameter is passed inside the concurrent dict, run the # original function as is, no need for pools if len(p_kwargs) == 1: kwargs.update(p_kwargs[0]) if len(p_kwargs) in (1, 0): return func(*args, **kwargs) # prepare the workers # If no number of workers passed or passed 0 if not n_workers: n_workers = cpu_count() logging.debug("Running concurrent %d workers", n_workers) worker_pool = [] in_queue = queue.Queue() out_queue = queue.Queue() for n_worker in range(n_workers): new_worker = Worker(in_queue, out_queue) new_worker.setDaemon(True) logging.debug("Spawning worker %d", n_worker) new_worker.start() worker_pool.append(new_worker) # Feed the workers n_ord = 0 for f_kwargs in p_kwargs: f_kwargs.update(kwargs) in_queue.put(TaskFunc(n_ord, func, args, f_kwargs)) n_ord += 1 for _ in range(n_workers): in_queue.put('done') # Wait for the results logging.debug("Waiting for workers to finish processing") results = [] for _ in p_kwargs: new_res = out_queue.get() results.append(new_res) # cleanup for worker in worker_pool: worker.join() # Reorder the results results = [r[1] for r in sorted(results)] logging.debug("Concurrent task finished") return results return concurrentized
apache-2.0
-385,167,474,104,383,360
33.423841
79
0.596768
false
hammerlab/isovar
test/test_value_object.py
1
1113
from __future__ import print_function, division, absolute_import from isovar.value_object import ValueObject from nose.tools import eq_ def test_no_fields_unless_specified(): v = ValueObject() eq_(v._fields, ()) eq_(v._values, ()) def test_default_string_repr(): v = ValueObject() eq_(str(v), "ValueObject()") eq_(repr(v), "ValueObject()") class DerivedWithoutInit(ValueObject): __slots__ = ["a", "b"] def test_default_init(): obj = DerivedWithoutInit(a=1, b=2) eq_(obj.a, 1) eq_(obj.b, 2) class DerivedWithInit(ValueObject): __slots__ = ["a", "b"] def __init__(self, a, b): self.a = a self.b = b def test_equality_checks_class(): # two objects of different classes should not be equal # even if their fields are the same x = DerivedWithInit(a=1, b=2) y = DerivedWithoutInit(a=1, b=2) eq_(hash(x), hash(y)) assert x != y, "Expected %s != %s" % (x, y) def test_derived_string_repr(): x = DerivedWithInit(a=1, b=2) eq_(str(x), "DerivedWithInit(a=1, b=2)") eq_(repr(x), "DerivedWithInit(a=1, b=2)")
apache-2.0
-5,438,773,729,833,326,000
21.26
64
0.601078
false
lpotter/sensorfw
tests/contextfw/orientation/testorientation.py
2
6293
#!/usr/bin/env python ## ## Copyright (C) 2009-2010 Nokia Corporation ## ## Contact: Jean-Luc Lamadon <[email protected]> ## Matias Muhonen <[email protected]> ## Tapio Rantala <[email protected]> ## Lihan Guo <[email protected]> ## ## This file is part of Sensord. ## ## Sensord is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License ## version 2.1 as published by the Free Software Foundation. ## ## Sensord is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with Sensord. If not, see <http://www.gnu.org/licenses/>. ## ## import sys import unittest import os import commands import string from string import Template import time import signal import math from ContextKit.cltool import CLTool def timeoutHandler(signum, frame): raise Exception('Tests have been running for too long') class Orientation(unittest.TestCase): def setUp(self): self.fpath = "/tmp/fakedsensors/accelerometer" self.datafaker = "/usr/bin/datafaker" self.context_client_edge = CLTool("context-listen", "Screen.TopEdge", "Position.IsFlat") self.context_client_cover = CLTool("context-listen", "Screen.IsCovered") # Get angle thresholds from config #Todo: Here should find the configuration file according to device type landscape_angle = int(os.popen("cat `ls /etc/sensorfw/sensord.conf.d/* /etc/sensorfw/sensord-ncdk.conf` | grep threshold_landscape | head -n1 | cut -f2 -d=", "r").read()) portrait_angle = int(os.popen("cat `ls /etc/sensorfw/sensord.conf.d/* /etc/sensorfw/sensord-ncdk.conf` | grep threshold_portrait | head -n1 | cut -f2 -d=", "r").read()) print("Using thresholds for orientation changes:\n Landscape: " + str(landscape_angle) + "\n Portrait: " + str(portrait_angle) + "\n") # Create data sets self.dataSet = [] self.expectSet = [] dataSet_top = [] dataSet_left = [] # TopEdge = top (U, U, T, U, T), starting from Unknown for angle in [0, portrait_angle-1, portrait_angle+1, portrait_angle-1, 90]: dataSet_top.append([0, int(1000*math.cos(math.radians(90-angle))), int(1000*math.cos(math.radians(angle)))]) self.dataSet += dataSet_top self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('') self.expectSet.append('Screen.TopEdge = QString:"top"') self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('Position.IsFlat = bool:false') # TopEdge = left (U, U, L, U, L) for angle in [0, landscape_angle-1, landscape_angle+1, landscape_angle-1, 90]: dataSet_left.append([-int(1000*math.cos(math.radians(90-angle))), 0, int(1000*math.cos(math.radians(angle)))]) self.dataSet += dataSet_left self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('') self.expectSet.append('Screen.TopEdge = QString:"left"') self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('Position.IsFlat = bool:false') # TopEdge = bottom, (U, U, B, U, B) for v in dataSet_top[:]: u = v[:] u[1] = -u[1] self.dataSet.append(u) self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('') self.expectSet.append('Screen.TopEdge = QString:"bottom"') self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('Position.IsFlat = bool:false') # TopEdge = right (U, U, R, U, R) for v in dataSet_left[:]: u = v[:] u[0] = -u[0] self.dataSet.append(u) self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('') self.expectSet.append('Screen.TopEdge = QString:"right"') self.expectSet.append('Position.IsFlat = bool:true') self.expectSet.append('Position.IsFlat = bool:false') # TopEdge: left -> top -> left (should represent bottom and right well enough) for angle in [90, portrait_angle-1, portrait_angle+1, 90-portrait_angle, 0]: self.dataSet.append([-int(1000*math.cos(math.radians(angle))), int(1000*math.cos(math.radians(90-angle))), 0]) self.expectSet.append('Screen.TopEdge = QString:"top"') self.expectSet.append('Screen.TopEdge = QString:"left"') self.expectSet.append('') self.expectSet.append('Screen.TopEdge = QString:"top"') self.expectSet.append('Screen.TopEdge = QString:"left"') def tearDown(self): self.context_client_edge.atexit() self.context_client_cover.atexit() def testOrientation(self): # Set the starting position to bottom (0, -1000, 0) os.system("echo 0 -1000 0 | " + self.datafaker + " " + self.fpath) index = 0 for v in self.dataSet[:]: time.sleep(0.9) if self.expectSet[index] != '': os.system("echo " + str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " | " + self.datafaker + " " + self.fpath) self.assert_(self.context_client_edge.expect(self.expectSet[index])) index += 1 # Set the starting position os.system("echo 0 0 -1000 | " + self.datafaker + " " + self.fpath) time.sleep(0.9) # On the table os.system("echo -36 -90 953 | " + self.datafaker + " " + self.fpath) self.assert_(self.context_client_cover.expect('Screen.IsCovered = bool:false')) time.sleep(0.9) # On the table upside down os.system("echo 270 216 -972 | " + self.datafaker + " " + self.fpath) self.assert_(self.context_client_cover.expect('Screen.IsCovered = bool:true')) if __name__ == "__main__": sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1) signal.signal(signal.SIGALRM, timeoutHandler) signal.alarm(30) unittest.main()
lgpl-2.1
2,377,500,349,425,687,000
39.6
178
0.627999
false
V155/qutebrowser
scripts/asciidoc2html.py
5
10825
#!/usr/bin/env python3 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]> # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Generate the html documentation based on the asciidoc files.""" import re import os import os.path import sys import subprocess import glob import shutil import tempfile import argparse import io sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) from scripts import utils class AsciiDoc: """Abstraction of an asciidoc subprocess.""" FILES = ['faq', 'changelog', 'contributing', 'quickstart', 'userscripts'] def __init__(self, args): self._cmd = None self._args = args self._homedir = None self._themedir = None self._tempdir = None self._failed = False def prepare(self): """Get the asciidoc command and create the homedir to use.""" self._cmd = self._get_asciidoc_cmd() self._homedir = tempfile.mkdtemp() self._themedir = os.path.join( self._homedir, '.asciidoc', 'themes', 'qute') self._tempdir = os.path.join(self._homedir, 'tmp') os.makedirs(self._tempdir) os.makedirs(self._themedir) def cleanup(self): """Clean up the temporary home directory for asciidoc.""" if self._homedir is not None and not self._failed: shutil.rmtree(self._homedir) def build(self): """Build either the website or the docs.""" if self._args.website: self._build_website() else: self._build_docs() self._copy_images() def _build_docs(self): """Render .asciidoc files to .html sites.""" files = [('doc/{}.asciidoc'.format(f), 'qutebrowser/html/doc/{}.html'.format(f)) for f in self.FILES] for src in glob.glob('doc/help/*.asciidoc'): name, _ext = os.path.splitext(os.path.basename(src)) dst = 'qutebrowser/html/doc/{}.html'.format(name) files.append((src, dst)) # patch image links to use local copy replacements = [ ("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-big.png", "qute://help/img/cheatsheet-big.png"), ("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-small.png", "qute://help/img/cheatsheet-small.png") ] asciidoc_args = ['-a', 'source-highlighter=pygments'] for src, dst in files: src_basename = os.path.basename(src) modified_src = os.path.join(self._tempdir, src_basename) with open(modified_src, 'w', encoding='utf-8') as modified_f, \ open(src, 'r', encoding='utf-8') as f: for line in f: for orig, repl in replacements: line = line.replace(orig, repl) modified_f.write(line) self.call(modified_src, dst, *asciidoc_args) def _copy_images(self): """Copy image files to qutebrowser/html/doc.""" print("Copying files...") dst_path = os.path.join('qutebrowser', 'html', 'doc', 'img') try: os.mkdir(dst_path) except FileExistsError: pass for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']: src = os.path.join('doc', 'img', filename) dst = os.path.join(dst_path, filename) shutil.copy(src, dst) def _build_website_file(self, root, filename): """Build a single website file.""" src = os.path.join(root, filename) src_basename = os.path.basename(src) parts = [self._args.website[0]] dirname = os.path.dirname(src) if dirname: parts.append(os.path.relpath(os.path.dirname(src))) parts.append( os.extsep.join((os.path.splitext(src_basename)[0], 'html'))) dst = os.path.join(*parts) os.makedirs(os.path.dirname(dst), exist_ok=True) modified_src = os.path.join(self._tempdir, src_basename) shutil.copy('www/header.asciidoc', modified_src) outfp = io.StringIO() with open(modified_src, 'r', encoding='utf-8') as header_file: header = header_file.read() header += "\n\n" with open(src, 'r', encoding='utf-8') as infp: outfp.write("\n\n") hidden = False found_title = False title = "" last_line = "" for line in infp: line = line.rstrip() if line == '// QUTE_WEB_HIDE': assert not hidden hidden = True elif line == '// QUTE_WEB_HIDE_END': assert hidden hidden = False elif line == "The Compiler <[email protected]>": continue elif re.fullmatch(r':\w+:.*', line): # asciidoc field continue if not found_title: if re.fullmatch(r'=+', line): line = line.replace('=', '-') found_title = True title = last_line + " | qutebrowser\n" title += "=" * (len(title) - 1) elif re.fullmatch(r'= .+', line): line = '==' + line[1:] found_title = True title = last_line + " | qutebrowser\n" title += "=" * (len(title) - 1) if not hidden: outfp.write(line.replace(".asciidoc[", ".html[") + '\n') last_line = line current_lines = outfp.getvalue() outfp.close() with open(modified_src, 'w+', encoding='utf-8') as final_version: final_version.write(title + "\n\n" + header + current_lines) asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual', '-a', 'source-highlighter=pygments'] self.call(modified_src, dst, *asciidoc_args) def _build_website(self): """Prepare and build the website.""" theme_file = os.path.abspath(os.path.join('www', 'qute.css')) shutil.copy(theme_file, self._themedir) outdir = self._args.website[0] for root, _dirs, files in os.walk(os.getcwd()): for filename in files: basename, ext = os.path.splitext(filename) if (ext != '.asciidoc' or basename in ['header', 'OpenSans-License']): continue self._build_website_file(root, filename) copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'} for src, dest in copy.items(): full_dest = os.path.join(outdir, dest) try: shutil.rmtree(full_dest) except FileNotFoundError: pass shutil.copytree(src, full_dest) for dst, link_name in [ ('README.html', 'index.html'), (os.path.join('doc', 'quickstart.html'), 'quickstart.html')]: try: os.symlink(dst, os.path.join(outdir, link_name)) except FileExistsError: pass def _get_asciidoc_cmd(self): """Try to find out what commandline to use to invoke asciidoc.""" if self._args.asciidoc is not None: return self._args.asciidoc try: subprocess.run(['asciidoc'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except OSError: pass else: return ['asciidoc'] try: subprocess.run(['asciidoc.py'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except OSError: pass else: return ['asciidoc.py'] raise FileNotFoundError def call(self, src, dst, *args): """Call asciidoc for the given files. Args: src: The source .asciidoc file. dst: The destination .html file, or None to auto-guess. *args: Additional arguments passed to asciidoc. """ print("Calling asciidoc for {}...".format(os.path.basename(src))) cmdline = self._cmd[:] if dst is not None: cmdline += ['--out-file', dst] cmdline += args cmdline.append(src) try: env = os.environ.copy() env['HOME'] = self._homedir subprocess.run(cmdline, check=True, env=env) except (subprocess.CalledProcessError, OSError) as e: self._failed = True utils.print_col(str(e), 'red') print("Keeping modified sources in {}.".format(self._homedir)) sys.exit(1) def main(colors=False): """Generate html files for the online documentation.""" utils.change_cwd() utils.use_color = colors parser = argparse.ArgumentParser() parser.add_argument('--website', help="Build website into a given " "directory.", nargs=1) parser.add_argument('--asciidoc', help="Full path to python and " "asciidoc.py. If not given, it's searched in PATH.", nargs=2, required=False, metavar=('PYTHON', 'ASCIIDOC')) args = parser.parse_args() try: os.mkdir('qutebrowser/html/doc') except FileExistsError: pass asciidoc = AsciiDoc(args) try: asciidoc.prepare() except FileNotFoundError: utils.print_col("Could not find asciidoc! Please install it, or use " "the --asciidoc argument to point this script to the " "correct python/asciidoc.py location!", 'red') sys.exit(1) try: asciidoc.build() finally: asciidoc.cleanup() if __name__ == '__main__': main(colors=True)
gpl-3.0
-8,659,248,438,781,764,000
34.726073
109
0.543834
false
codester2/devide
modules/insight/module_index.py
7
12137
# Copyright (c) Charl P. Botha, TU Delft # All rights reserved. # See COPYRIGHT for details. class ITKtoVTK: kits = ['itk_kit'] cats = ['Insight'] help = """Use this module to convert from any ITK image type to the corresponding VTK type. """ class VTKtoITK: kits = ['itk_kit'] cats = ['Insight'] help = """Convert from a VTK image to an ITK image. By default (AutoType active), the output ITK image has the same pixel type as the input VTK image. However, if AutoType has been unchecked in the configuration, the output ITK image has 'Data type' as its type. """ class cannyEdgeDetection: kits = ['itk_kit'] cats = ['Insight'] help = """Performs 3D Canny edge detection on input image. A rule of thumb for the thersholds: lower threshold == 0.5 * upper threshold. NOTE: Due to a bug in ITK [1], the Canny filter gives invalid results when run more than once with different sets of parameters. To work around this, DeVIDE re-instantiates the canny filter at every execution. This means that only parameters that you see in the GUI are transferred to the new instance. [1] http://www.itk.org/pipermail/insight-users/2009-August/032018.html """ class confidenceSeedConnect: kits = ['itk_kit'] cats = ['Insight'] keywords = ['region growing', 'confidence', 'seed'] help = """Confidence-based 3D region growing. This module will perform a 3D region growing starting from the user-supplied points. The mean and standard deviation are calculated in a small initial region around the seed points. New contiguous points have to have intensities on the range [mean - f*stdDev, mean + f*stdDev] to be included. f is user-definable. After this initial growing iteration, if the user has specified a larger than 0 number of iterations, the mean and standard deviation are recalculated over all the currently selected points and the process is restarted. This process is repeated for the user-defined number of iterations, or until now new pixels are added. Due to weirdness in the underlying ITK filter, deleting all points won't quite work. In other words, the output of this module can only be trusted if there's at least a single seed point. """ class curvatureAnisotropicDiffusion: kits = ['itk_kit'] cats = ['Insight'] class curvatureFlowDenoising: kits = ['itk_kit'] cats = ['Insight', 'Level Sets'] help = """Curvature-driven image denoising. This uses curvature-based level set techniques to smooth homogeneous regions whilst retaining boundary information. """ class DanielssonDistance: kits = ['itk_kit'] cats = ['Insight'] help = """Calculates distance image of input image. The input image can either contain marked objects or binary objects. """ class demonsRegistration: kits = ['itk_kit'] cats = ['Insight', 'Registration', 'Optic Flow'] help = """Performs demons registration on fixed and moving input images, returns deformation field. The intensity difference threshold is absolute, so check the values in your datasets and adjust it accordingly. For example, if you find that two regions should match but you see intensity differences of 50 (e.g. in a CT dataset), the threshold should be approximately 60. NOTE: remember to update help w.r.t. inverse direction of vectors in deformation field. Also read this thread: http://public.kitware.com/pipermail/insight-users/2004-November/011002.html """ class discreteLaplacian: kits = ['itk_kit'] cats = ['Insight'] help = """Calculates Laplacian of input image. This makes use of a discrete implementation. Due to this, the input image should probably be pre-smoothed with e.g. a Gaussian as the Laplacian is very sensitive to noise. Note: One could also calculate the Laplacian by convolving with the second derivative of a Gaussian. Laplacian == secondPartialDerivative(f,x0) + ... + secondPartialDerivative(f,xn) """ # had to disable this one due to stupid itkLevelSetNode non-wrapping # in ITK-2-4-1 class fastMarching: kits = ['itk_kit'] cats = ['Insight', 'Level Sets'] help = """Given a set of seed points and a speed image, this module will propagate a moving front out from those points using the fast marching level set formulation. """ class gaussianConvolve: kits = ['itk_kit'] cats = ['Insight'] help = """Convolves input with Gaussian, or its first or second derivative. Only a single dimension is convolved (i.e. the filter is separated). Select which dimension in the View/Config window. The convolution is implemented as an IIR filter. $Revision: 1.4 $ """ class geodesicActiveContour: kits = ['itk_kit'] cats = ['Insight', 'Level Sets'] keywords = ['level set'] help = """Module for performing Geodesic Active Contour-based segmentation on 3D data. The input feature image is an edge potential map with values close to 0 in regions close to the edges and values close to 1 otherwise. The level set speed function is based on this. For example: smooth an input image, determine the gradient magnitude and then pass it through a sigmoid transformation to create an edge potential map. The initial level set is a volume with the initial surface embedded as the 0 level set, i.e. the 0-value iso-contour (more or less). Also see figure 9.18 in the ITK Software Guide. """ class gradientAnisotropicDiffusion: kits = ['itk_kit'] cats = ['Insight'] help = """Performs a gradient-based anisotropic diffusion. This will smooth homogeneous areas whilst preserving features (e.g. edges). """ class gradientMagnitudeGaussian: kits = ['itk_kit'] cats = ['Insight'] help = """Calculates gradient magnitude of an image by convolving with the derivative of a Gaussian. The ITK class that this is based on uses a recursive gaussian filter implementation. """ # isn't wrapped anymore, no idea why. #class gvfgac: # kits = ['itk_kit'] # cats = ['Insight'] # will fix when I rework the registration modules #class imageStackRDR: # kits = ['itk_kit'] # cats = ['Insight'] class isolatedConnect: kits = ['itk_kit'] cats = ['Insight'] keywords = ['segment'] help = """Voxels connected to the first group of seeds and NOT connected to the second group of seeds are segmented by optimising an upper or lower threshold. For example, to separate two non-touching light objects, you would do the following: <ul> <li>Select point(s) in the first object with slice3dVWR 1</li> <li>Select point(s) in the second object with slice3dVWR 2</li> <li>Connect up the three inputs of isolatedConnect as follows: input image, point(s) of object 1, point(s) of object 2</li> <li>isolatedConnect will now calculate a threshold so that when this threshold is applied to the image and a region growing is performed using the first set of points, only object 1 will be separated.</li> </il> </ul> """ class ITKReader: kits = ['itk_kit'] cats = ['Insight', 'Readers'] help = """Reads all the 3D formats supported by ITK. In its default configuration, this module will derive file type, data type and dimensionality from the file itself. You can manually set the data type and dimensionality, in which case ITK will attempt to cast the data. Keep in mind that DeVIDE mostly uses the float versions of ITK components. At least the following file formats are available (a choice is made based on the filename extension that you choose):<br> <ul> <li>.mha: MetaImage all-in-one file</li> <li>.mhd: MetaImage .mhd header file and .raw data file</li> <li>.hdr or .img: Analyze .hdr header and .img data</li> </ul> """ class ITKWriter: kits = ['itk_kit'] cats = ['Insight', 'Writers'] help = """Writes any of the image formats supported by ITK. At least the following file formats are available (a choice is made based on the filename extension that you choose):<br> <ul> <li>.mha: MetaImage all-in-one file</li> <li>.mhd: MetaImage .mhd header file and .raw data file</li> <li>.hdr or .img: Analyze .hdr header and .img data</li> </ul> """ # not wrapped by ITK-2-4-1 default wrappings class levelSetMotionRegistration: kits = ['itk_kit'] cats = ['Insight', 'Registration', 'Level Sets'] keywords = ['level set', 'registration', 'deformable', 'non-rigid'] help = """Performs deformable registration between two input volumes using level set motion. """ # not wrapped by WrapITK 20060710 # class nbCurvesLevelSet: # kits = ['itk_kit'] # cats = ['Insight', 'Level Set'] # keywords = ['level set'] # help = """Narrow band level set implementation. # The input feature image is an edge potential map with values close to 0 in # regions close to the edges and values close to 1 otherwise. The level set # speed function is based on this. For example: smooth an input image, # determine the gradient magnitude and then pass it through a sigmoid # transformation to create an edge potential map. # The initial level set is a volume with the initial surface embedded as the # 0 level set, i.e. the 0-value iso-contour (more or less). # """ class nbhSeedConnect: kits = ['itk_kit'] cats = ['Insight'] help = """Neighbourhood-based 3D region growing. This module will perform a 3D region growing starting from the user-supplied points. Only pixels with intensities between the user-configurable thresholds and with complete neighbourhoods where all pixels have intensities between the thresholds are considered valid candidates. The size of the neighbourhood can be set as well. """ # reactivate when I rework the registration modules #class register2D: # kits = ['itk_kit'] # cats = ['Insight'] class sigmoid: kits = ['itk_kit'] cats = ['Insight'] help = """Perform sigmoid transformation on all input voxels. f(x) = (max - min) frac{1}{1 + exp(- frac{x - beta}{alpha})} + min """ class symmetricDemonsRegistration: kits = ['itk_kit'] cats = ['Insight', 'Registration', 'Optic Flow'] help = """Performs symmetric forces demons registration on fixed and moving input images, returns deformation field. """ class tpgac: kits = ['itk_kit'] cats = ['Insight', 'Level Sets'] keywords = ['segment', 'level set'] help = """Module for performing topology-preserving Geodesic Active Contour-based segmentation on 3D data. This module requires a DeVIDE-specific ITK class. The input feature image is an edge potential map with values close to 0 in regions close to the edges and values close to 1 otherwise. The level set speed function is based on this. For example: smooth an input image, determine the gradient magnitude and then pass it through a sigmoid transformation to create an edge potential map. The initial level set is a volume with the initial surface embedded as the 0 level set, i.e. the 0-value iso-contour (more or less). Also see figure 9.18 in the ITK Software Guide. """ # will work on this when I rework the 2D registration #class transform2D: # kits = ['itk_kit'] # cats = ['Insight'] #class transformStackRDR: # kits = ['itk_kit'] # cats = ['Insight'] #class transformStackWRT: # kits = ['itk_kit'] # cats = ['Insight'] class watershed: kits = ['itk_kit'] cats = ['Insight'] help = """Perform watershed segmentation on input. Typically, the input will be the gradient magnitude image. Often, data is smoothed with one of the anisotropic diffusion filters and then the gradient magnitude image is calculated. This serves as input to the watershed module. """
bsd-3-clause
4,509,483,396,588,039,000
32.902235
80
0.685837
false
neilnee/octopus
scrapy_sample/scrapy_sample/settings.py
1
3196
# -*- coding: utf-8 -*- # Scrapy settings for scrapy_sample project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'scrapy_sample' SPIDER_MODULES = ['scrapy_sample.spiders'] NEWSPIDER_MODULE = 'scrapy_sample.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'scrapy_sample (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'scrapy_sample.middlewares.ScrapySampleSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'scrapy_sample.middlewares.MyCustomDownloaderMiddleware': 543, #} # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = { # 'scrapy_sample.pipelines.ScrapySamplePipeline': 300, #} # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
apache-2.0
-3,693,619,621,398,770,000
34.511111
109
0.767522
false
vbannai/neutron
neutron/extensions/firewall.py
3
15602
# Copyright 2013 Big Switch Networks, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc. import abc from oslo.config import cfg import six from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.common import exceptions as qexception from neutron.openstack.common import log as logging from neutron.plugins.common import constants from neutron.services import service_base LOG = logging.getLogger(__name__) # Firewall Exceptions class FirewallNotFound(qexception.NotFound): message = _("Firewall %(firewall_id)s could not be found.") class FirewallInUse(qexception.InUse): message = _("Firewall %(firewall_id)s is still active.") class FirewallInPendingState(qexception.Conflict): message = _("Operation cannot be performed since associated Firewall " "%(firewall_id)s is in %(pending_state)s.") class FirewallPolicyNotFound(qexception.NotFound): message = _("Firewall Policy %(firewall_policy_id)s could not be found.") class FirewallPolicyInUse(qexception.InUse): message = _("Firewall Policy %(firewall_policy_id)s is being used.") class FirewallRuleNotFound(qexception.NotFound): message = _("Firewall Rule %(firewall_rule_id)s could not be found.") class FirewallRuleInUse(qexception.InUse): message = _("Firewall Rule %(firewall_rule_id)s is being used.") class FirewallRuleNotAssociatedWithPolicy(qexception.InvalidInput): message = _("Firewall Rule %(firewall_rule_id)s is not associated " " with Firewall Policy %(firewall_policy_id)s.") class FirewallRuleInvalidProtocol(qexception.InvalidInput): message = _("Firewall Rule protocol %(protocol)s is not supported. " "Only protocol values %(values)s and their integer " "representation (0 to 255) are supported.") class FirewallRuleInvalidAction(qexception.InvalidInput): message = _("Firewall rule action %(action)s is not supported. " "Only action values %(values)s are supported.") class FirewallInvalidPortValue(qexception.InvalidInput): message = _("Invalid value for port %(port)s.") class FirewallRuleInfoMissing(qexception.InvalidInput): message = _("Missing rule info argument for insert/remove " "rule operation.") class FirewallInternalDriverError(qexception.NeutronException): """Fwaas exception for all driver errors. On any failure or exception in the driver, driver should log it and raise this exception to the agent """ message = _("%(driver)s: Internal driver error.") fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP] fw_valid_action_values = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] def convert_protocol(value): if value is None: return if value.isdigit(): val = int(value) if 0 <= val <= 255: return val else: raise FirewallRuleInvalidProtocol(protocol=value, values= fw_valid_protocol_values) elif value.lower() in fw_valid_protocol_values: return value.lower() else: raise FirewallRuleInvalidProtocol(protocol=value, values= fw_valid_protocol_values) def convert_action_to_case_insensitive(value): if value is None: return else: return value.lower() def convert_port_to_string(value): if value is None: return else: return str(value) def _validate_port_range(data, key_specs=None): if data is None: return data = str(data) ports = data.split(':') for p in ports: try: val = int(p) except (ValueError, TypeError): msg = _("Port '%s' is not a valid number") % p LOG.debug(msg) return msg if val <= 0 or val > 65535: msg = _("Invalid port '%s'") % p LOG.debug(msg) return msg def _validate_ip_or_subnet_or_none(data, valid_values=None): if data is None: return None msg_ip = attr._validate_ip_address(data, valid_values) if not msg_ip: return msg_subnet = attr._validate_subnet(data, valid_values) if not msg_subnet: return return _("%(msg_ip)s and %(msg_subnet)s") % {'msg_ip': msg_ip, 'msg_subnet': msg_subnet} attr.validators['type:port_range'] = _validate_port_range attr.validators['type:ip_or_subnet_or_none'] = _validate_ip_or_subnet_or_none RESOURCE_ATTRIBUTE_MAP = { 'firewall_rules': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'firewall_policy_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'is_visible': True}, 'shared': {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, 'protocol': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'convert_to': convert_protocol, 'validate': {'type:values': fw_valid_protocol_values}}, 'ip_version': {'allow_post': True, 'allow_put': True, 'default': 4, 'convert_to': attr.convert_to_int, 'validate': {'type:values': [4, 6]}, 'is_visible': True}, 'source_ip_address': {'allow_post': True, 'allow_put': True, 'validate': {'type:ip_or_subnet_or_none': None}, 'is_visible': True, 'default': None}, 'destination_ip_address': {'allow_post': True, 'allow_put': True, 'validate': {'type:ip_or_subnet_or_none': None}, 'is_visible': True, 'default': None}, 'source_port': {'allow_post': True, 'allow_put': True, 'validate': {'type:port_range': None}, 'convert_to': convert_port_to_string, 'default': None, 'is_visible': True}, 'destination_port': {'allow_post': True, 'allow_put': True, 'validate': {'type:port_range': None}, 'convert_to': convert_port_to_string, 'default': None, 'is_visible': True}, 'position': {'allow_post': False, 'allow_put': False, 'default': None, 'is_visible': True}, 'action': {'allow_post': True, 'allow_put': True, 'convert_to': convert_action_to_case_insensitive, 'validate': {'type:values': fw_valid_action_values}, 'is_visible': True, 'default': 'deny'}, 'enabled': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, }, 'firewall_policies': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'shared': {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, 'firewall_rules': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_list': None}, 'convert_to': attr.convert_none_to_empty_list, 'default': None, 'is_visible': True}, 'audited': {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, }, 'firewalls': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'shared': {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': attr.convert_to_boolean, 'is_visible': False, 'required_by_policy': True, 'enforce_policy': True}, 'firewall_policy_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_or_none': None}, 'is_visible': True}, }, } firewall_quota_opts = [ cfg.IntOpt('quota_firewall', default=1, help=_('Number of firewalls allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_firewall_policy', default=1, help=_('Number of firewall policies allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_firewall_rule', default=-1, help=_('Number of firewall rules allowed per tenant. ' 'A negative value means unlimited.')), ] cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS') class Firewall(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Firewall service" @classmethod def get_alias(cls): return "fwaas" @classmethod def get_description(cls): return "Extension for Firewall service" @classmethod def get_namespace(cls): return "http://wiki.openstack.org/Neutron/FWaaS/API_1.0" @classmethod def get_updated(cls): return "2013-02-25T10:00:00-00:00" @classmethod def get_resources(cls): special_mappings = {'firewall_policies': 'firewall_policy'} plural_mappings = resource_helper.build_plural_mappings( special_mappings, RESOURCE_ATTRIBUTE_MAP) attr.PLURALS.update(plural_mappings) action_map = {'firewall_policy': {'insert_rule': 'PUT', 'remove_rule': 'PUT'}} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.FIREWALL, action_map=action_map) @classmethod def get_plugin_interface(cls): return FirewallPluginBase def update_attributes_map(self, attributes): super(Firewall, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} @six.add_metaclass(abc.ABCMeta) class FirewallPluginBase(service_base.ServicePluginBase): def get_plugin_name(self): return constants.FIREWALL def get_plugin_type(self): return constants.FIREWALL def get_plugin_description(self): return 'Firewall service plugin' @abc.abstractmethod def get_firewalls(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_firewall(self, context, id, fields=None): pass @abc.abstractmethod def create_firewall(self, context, firewall): pass @abc.abstractmethod def update_firewall(self, context, id, firewall): pass @abc.abstractmethod def delete_firewall(self, context, id): pass @abc.abstractmethod def get_firewall_rules(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_firewall_rule(self, context, id, fields=None): pass @abc.abstractmethod def create_firewall_rule(self, context, firewall_rule): pass @abc.abstractmethod def update_firewall_rule(self, context, id, firewall_rule): pass @abc.abstractmethod def delete_firewall_rule(self, context, id): pass @abc.abstractmethod def get_firewall_policy(self, context, id, fields=None): pass @abc.abstractmethod def get_firewall_policies(self, context, filters=None, fields=None): pass @abc.abstractmethod def create_firewall_policy(self, context, firewall_policy): pass @abc.abstractmethod def update_firewall_policy(self, context, id, firewall_policy): pass @abc.abstractmethod def delete_firewall_policy(self, context, id): pass @abc.abstractmethod def insert_rule(self, context, id, rule_info): pass @abc.abstractmethod def remove_rule(self, context, id, rule_info): pass
apache-2.0
-2,531,924,139,244,412,400
35.624413
79
0.561659
false
p4datasystems/CarnotKE
jyhton/Lib/test/test_repr.py
12
12407
""" Test cases for the repr module Nick Mathewson """ import sys import os import shutil import unittest from test.test_support import run_unittest, check_py3k_warnings from repr import repr as r # Don't shadow builtin repr from repr import Repr def nestedTuple(nesting): t = () for i in range(nesting): t = (t,) return t class ReprTests(unittest.TestCase): def test_string(self): eq = self.assertEqual eq(r("abc"), "'abc'") eq(r("abcdefghijklmnop"),"'abcdefghijklmnop'") s = "a"*30+"b"*30 expected = repr(s)[:13] + "..." + repr(s)[-14:] eq(r(s), expected) eq(r("\"'"), repr("\"'")) s = "\""*30+"'"*100 expected = repr(s)[:13] + "..." + repr(s)[-14:] eq(r(s), expected) def test_tuple(self): eq = self.assertEqual eq(r((1,)), "(1,)") t3 = (1, 2, 3) eq(r(t3), "(1, 2, 3)") r2 = Repr() r2.maxtuple = 2 expected = repr(t3)[:-2] + "...)" eq(r2.repr(t3), expected) def test_container(self): from array import array from collections import deque eq = self.assertEqual # Tuples give up after 6 elements eq(r(()), "()") eq(r((1,)), "(1,)") eq(r((1, 2, 3)), "(1, 2, 3)") eq(r((1, 2, 3, 4, 5, 6)), "(1, 2, 3, 4, 5, 6)") eq(r((1, 2, 3, 4, 5, 6, 7)), "(1, 2, 3, 4, 5, 6, ...)") # Lists give up after 6 as well eq(r([]), "[]") eq(r([1]), "[1]") eq(r([1, 2, 3]), "[1, 2, 3]") eq(r([1, 2, 3, 4, 5, 6]), "[1, 2, 3, 4, 5, 6]") eq(r([1, 2, 3, 4, 5, 6, 7]), "[1, 2, 3, 4, 5, 6, ...]") # Sets give up after 6 as well eq(r(set([])), "set([])") eq(r(set([1])), "set([1])") eq(r(set([1, 2, 3])), "set([1, 2, 3])") eq(r(set([1, 2, 3, 4, 5, 6])), "set([1, 2, 3, 4, 5, 6])") eq(r(set([1, 2, 3, 4, 5, 6, 7])), "set([1, 2, 3, 4, 5, 6, ...])") # Frozensets give up after 6 as well eq(r(frozenset([])), "frozenset([])") eq(r(frozenset([1])), "frozenset([1])") eq(r(frozenset([1, 2, 3])), "frozenset([1, 2, 3])") eq(r(frozenset([1, 2, 3, 4, 5, 6])), "frozenset([1, 2, 3, 4, 5, 6])") eq(r(frozenset([1, 2, 3, 4, 5, 6, 7])), "frozenset([1, 2, 3, 4, 5, 6, ...])") # collections.deque after 6 eq(r(deque([1, 2, 3, 4, 5, 6, 7])), "deque([1, 2, 3, 4, 5, 6, ...])") # Dictionaries give up after 4. eq(r({}), "{}") d = {'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4} eq(r(d), "{'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}") d['arthur'] = 1 eq(r(d), "{'alice': 1, 'arthur': 1, 'bob': 2, 'charles': 3, ...}") # array.array after 5. eq(r(array('i')), "array('i', [])") eq(r(array('i', [1])), "array('i', [1])") eq(r(array('i', [1, 2])), "array('i', [1, 2])") eq(r(array('i', [1, 2, 3])), "array('i', [1, 2, 3])") eq(r(array('i', [1, 2, 3, 4])), "array('i', [1, 2, 3, 4])") eq(r(array('i', [1, 2, 3, 4, 5])), "array('i', [1, 2, 3, 4, 5])") eq(r(array('i', [1, 2, 3, 4, 5, 6])), "array('i', [1, 2, 3, 4, 5, ...])") def test_numbers(self): eq = self.assertEqual eq(r(123), repr(123)) eq(r(123L), repr(123L)) eq(r(1.0/3), repr(1.0/3)) n = 10L**100 expected = repr(n)[:18] + "..." + repr(n)[-19:] eq(r(n), expected) def test_instance(self): eq = self.assertEqual i1 = ClassWithRepr("a") eq(r(i1), repr(i1)) i2 = ClassWithRepr("x"*1000) expected = repr(i2)[:13] + "..." + repr(i2)[-14:] eq(r(i2), expected) i3 = ClassWithFailingRepr() eq(r(i3), ("<ClassWithFailingRepr instance at %x>"%id(i3))) s = r(ClassWithFailingRepr) self.assertTrue(s.startswith("<class ")) self.assertTrue(s.endswith(">")) self.assertTrue(s.find("...") == 8) def test_file(self): fp = open(unittest.__file__) self.assertTrue(repr(fp).startswith( "<open file %r, mode 'r' at 0x" % unittest.__file__)) fp.close() self.assertTrue(repr(fp).startswith( "<closed file %r, mode 'r' at 0x" % unittest.__file__)) def test_lambda(self): self.assertTrue(repr(lambda x: x).startswith( "<function <lambda")) # XXX anonymous functions? see func_repr def test_builtin_function(self): eq = self.assertEqual # Functions eq(repr(hash), '<built-in function hash>') # Methods self.assertTrue(repr(''.split).startswith( '<built-in method split of str object at 0x')) def test_xrange(self): eq = self.assertEqual eq(repr(xrange(1)), 'xrange(1)') eq(repr(xrange(1, 2)), 'xrange(1, 2)') eq(repr(xrange(1, 2, 3)), 'xrange(1, 4, 3)') def test_nesting(self): eq = self.assertEqual # everything is meant to give up after 6 levels. eq(r([[[[[[[]]]]]]]), "[[[[[[[]]]]]]]") eq(r([[[[[[[[]]]]]]]]), "[[[[[[[...]]]]]]]") eq(r(nestedTuple(6)), "(((((((),),),),),),)") eq(r(nestedTuple(7)), "(((((((...),),),),),),)") eq(r({ nestedTuple(5) : nestedTuple(5) }), "{((((((),),),),),): ((((((),),),),),)}") eq(r({ nestedTuple(6) : nestedTuple(6) }), "{((((((...),),),),),): ((((((...),),),),),)}") eq(r([[[[[[{}]]]]]]), "[[[[[[{}]]]]]]") eq(r([[[[[[[{}]]]]]]]), "[[[[[[[...]]]]]]]") def test_buffer(self): # XXX doesn't test buffers with no b_base or read-write buffers (see # bufferobject.c). The test is fairly incomplete too. Sigh. with check_py3k_warnings(): x = buffer('foo') self.assertTrue(repr(x).startswith('<read-only buffer for 0x')) def test_cell(self): # XXX Hmm? How to get at a cell object? pass def test_descriptors(self): eq = self.assertEqual # method descriptors eq(repr(dict.items), "<method 'items' of 'dict' objects>") # XXX member descriptors # XXX attribute descriptors # XXX slot descriptors # static and class methods class C: def foo(cls): pass x = staticmethod(C.foo) self.assertTrue(repr(x).startswith('<staticmethod object at 0x')) x = classmethod(C.foo) self.assertTrue(repr(x).startswith('<classmethod object at 0x')) def test_unsortable(self): # Repr.repr() used to call sorted() on sets, frozensets and dicts # without taking into account that not all objects are comparable x = set([1j, 2j, 3j]) y = frozenset(x) z = {1j: 1, 2j: 2} r(x) r(y) r(z) def touch(path, text=''): fp = open(path, 'w') fp.write(text) fp.close() class LongReprTest(unittest.TestCase): def setUp(self): longname = 'areallylongpackageandmodulenametotestreprtruncation' self.pkgname = os.path.join(longname) self.subpkgname = os.path.join(longname, longname) # Make the package and subpackage shutil.rmtree(self.pkgname, ignore_errors=True) os.mkdir(self.pkgname) touch(os.path.join(self.pkgname, '__init__'+os.extsep+'py')) shutil.rmtree(self.subpkgname, ignore_errors=True) os.mkdir(self.subpkgname) touch(os.path.join(self.subpkgname, '__init__'+os.extsep+'py')) # Remember where we are self.here = os.getcwd() sys.path.insert(0, self.here) def tearDown(self): actions = [] for dirpath, dirnames, filenames in os.walk(self.pkgname): for name in dirnames + filenames: actions.append(os.path.join(dirpath, name)) actions.append(self.pkgname) actions.sort() actions.reverse() for p in actions: if os.path.isdir(p): os.rmdir(p) else: os.remove(p) del sys.path[0] def test_module(self): eq = self.assertEqual touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py')) from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation eq(repr(areallylongpackageandmodulenametotestreprtruncation), "<module '%s' from '%s'>" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) # XXX: Jython sys module is not a real module #eq(repr(sys), "<module 'sys' (built-in)>") def test_type(self): eq = self.assertEqual touch(os.path.join(self.subpkgname, 'foo'+os.extsep+'py'), '''\ class foo(object): pass ''') from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import foo eq(repr(foo.foo), "<class '%s.foo'>" % foo.__name__) def test_object(self): # XXX Test the repr of a type with a really long tp_name but with no # tp_repr. WIBNI we had ::Inline? :) pass def test_class(self): touch(os.path.join(self.subpkgname, 'bar'+os.extsep+'py'), '''\ class bar: pass ''') from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import bar # Module name may be prefixed with "test.", depending on how run. self.assertTrue(repr(bar.bar).startswith( "<class %s.bar at 0x" % bar.__name__)) def test_instance(self): touch(os.path.join(self.subpkgname, 'baz'+os.extsep+'py'), '''\ class baz: pass ''') from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import baz ibaz = baz.baz() self.assertTrue(repr(ibaz).startswith( "<%s.baz instance at 0x" % baz.__name__)) def test_method(self): eq = self.assertEqual touch(os.path.join(self.subpkgname, 'qux'+os.extsep+'py'), '''\ class aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: def amethod(self): pass ''') from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import qux # Unbound methods first eq(repr(qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod), '<unbound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod>') # Bound method next iqux = qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa() self.assertTrue(repr(iqux.amethod).startswith( '<bound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod of <%s.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa instance at 0x' \ % (qux.__name__,) )) def test_builtin_function(self): # XXX test built-in functions and methods with really long names pass class ClassWithRepr: def __init__(self, s): self.s = s def __repr__(self): return "ClassWithLongRepr(%r)" % self.s class ClassWithFailingRepr: def __repr__(self): raise Exception("This should be caught by Repr.repr_instance") def test_main(): run_unittest(ReprTests) run_unittest(LongReprTest) if __name__ == "__main__": test_main()
apache-2.0
6,751,600,404,997,393,000
36.82622
461
0.575643
false
pku9104038/edx-platform
common/lib/xmodule/xmodule/videoannotation_module.py
7
6581
""" Module for Video annotations using annotator. """ from lxml import etree from pkg_resources import resource_string from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor from xblock.core import Scope, String import textwrap class AnnotatableFields(object): """ Fields for `VideoModule` and `VideoDescriptor`. """ data = String(help="XML data for the annotation", scope=Scope.content, default=textwrap.dedent("""\ <annotatable> <instructions> <p> Add the instructions to the assignment here. </p> </instructions> </annotatable> """)) display_name = String( display_name="Display Name", help="Display name for this module", scope=Scope.settings, default='Video Annotation', ) sourceurl = String(help="The external source URL for the video.", display_name="Source URL", scope=Scope.settings, default="http://video-js.zencoder.com/oceans-clip.mp4") poster_url = String(help="Poster Image URL", display_name="Poster URL", scope=Scope.settings, default="") annotation_storage_url = String(help="Location of Annotation backend", scope=Scope.settings, default="http://your_annotation_storage.com", display_name="Url for Annotation Storage") class VideoAnnotationModule(AnnotatableFields, XModule): '''Video Annotation Module''' js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/html/display.coffee'), resource_string(__name__, 'js/src/annotatable/display.coffee') ], 'js': []} css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]} icon_class = 'videoannotation' def __init__(self, *args, **kwargs): super(VideoAnnotationModule, self).__init__(*args, **kwargs) xmltree = etree.fromstring(self.data) self.instructions = self._extract_instructions(xmltree) self.content = etree.tostring(xmltree, encoding='unicode') self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green'] def _get_annotation_class_attr(self, element): """ Returns a dict with the CSS class attribute to set on the annotation and an XML key to delete from the element. """ attr = {} cls = ['annotatable-span', 'highlight'] highlight_key = 'highlight' color = element.get(highlight_key) if color is not None: if color in self.highlight_colors: cls.append('highlight-' + color) attr['_delete'] = highlight_key attr['value'] = ' '.join(cls) return {'class': attr} def _get_annotation_data_attr(self, element): """ Returns a dict in which the keys are the HTML data attributes to set on the annotation element. Each data attribute has a corresponding 'value' and (optional) '_delete' key to specify an XML attribute to delete. """ data_attrs = {} attrs_map = { 'body': 'data-comment-body', 'title': 'data-comment-title', 'problem': 'data-problem-id' } for xml_key in attrs_map.keys(): if xml_key in element.attrib: value = element.get(xml_key, '') html_key = attrs_map[xml_key] data_attrs[html_key] = {'value': value, '_delete': xml_key} return data_attrs def _render_annotation(self, element): """ Renders an annotation element for HTML output. """ attr = {} attr.update(self._get_annotation_class_attr(element)) attr.update(self._get_annotation_data_attr(element)) element.tag = 'span' for key in attr.keys(): element.set(key, attr[key]['value']) if '_delete' in attr[key] and attr[key]['_delete'] is not None: delete_key = attr[key]['_delete'] del element.attrib[delete_key] def _render_content(self): """ Renders annotatable content with annotation spans and returns HTML. """ xmltree = etree.fromstring(self.content) xmltree.tag = 'div' if 'display_name' in xmltree.attrib: del xmltree.attrib['display_name'] for element in xmltree.findall('.//annotation'): self._render_annotation(element) return etree.tostring(xmltree, encoding='unicode') def _extract_instructions(self, xmltree): """ Removes <instructions> from the xmltree and returns them as a string, otherwise None. """ instructions = xmltree.find('instructions') if instructions is not None: instructions.tag = 'div' xmltree.remove(instructions) return etree.tostring(instructions, encoding='unicode') return None def _get_extension(self, srcurl): ''' get the extension of a given url ''' if 'youtu' in srcurl: return 'video/youtube' else: spliturl = srcurl.split(".") extensionplus1 = spliturl[len(spliturl) - 1] spliturl = extensionplus1.split("?") extensionplus2 = spliturl[0] spliturl = extensionplus2.split("#") return 'video/' + spliturl[0] def get_html(self): """ Renders parameters to template. """ extension = self._get_extension(self.sourceurl) context = { 'display_name': self.display_name_with_default, 'instructions_html': self.instructions, 'sourceUrl': self.sourceurl, 'typeSource': extension, 'poster': self.poster_url, 'alert': self, 'content_html': self._render_content(), 'annotation_storage': self.annotation_storage_url } return self.system.render_template('videoannotation.html', context) class VideoAnnotationDescriptor(AnnotatableFields, RawDescriptor): ''' Video annotation descriptor ''' module_class = VideoAnnotationModule mako_template = "widgets/raw-edit.html" @property def non_editable_metadata_fields(self): non_editable_fields = super(VideoAnnotationDescriptor, self).non_editable_metadata_fields non_editable_fields.extend([ VideoAnnotationDescriptor.annotation_storage_url ]) return non_editable_fields
agpl-3.0
3,959,338,050,696,684,000
37.261628
185
0.605227
false
reachalpineswift/frappe-bench
frappe/config/desktop.py
8
1740
from __future__ import unicode_literals from frappe import _ def get_data(): return { "Activity": { "color": "#e67e22", "icon": "icon-play", "icon": "octicon octicon-pulse", "label": _("Activity"), "link": "activity", "type": "page" }, "Calendar": { "color": "#2980b9", "icon": "icon-calendar", "icon": "octicon octicon-calendar", "label": _("Calendar"), "link": "Calendar/Event", "type": "view" }, "Messages": { "color": "#9b59b6", "icon": "icon-comments", "icon": "octicon octicon-comment-discussion", "label": _("Messages"), "link": "messages", "type": "page" }, "To Do": { "color": "#f1c40f", "icon": "icon-check", "icon": "octicon octicon-check", "label": _("To Do"), "link": "List/ToDo", "doctype": "ToDo", "type": "list" }, "Notes": { "color": "#95a5a6", "doctype": "Note", "icon": "icon-file-alt", "icon": "octicon octicon-file-text", "label": _("Notes"), "link": "List/Note", "type": "list" }, "Website": { "color": "#16a085", "icon": "icon-globe", "icon": "octicon octicon-globe", "type": "module" }, "Installer": { "color": "#5ac8fb", "icon": "icon-download", "icon": "octicon octicon-cloud-download", "link": "applications", "type": "page", "label": _("Installer") }, "Setup": { "color": "#bdc3c7", "icon": "icon-wrench", "icon": "octicon octicon-settings", "type": "module" }, "Core": { "color": "#589494", "icon": "icon-cog", "icon": "octicon octicon-file-binary", "type": "module", "system_manager": 1 }, "Integrations": { "color": "#36414C", "icon": "octicon octicon-plug", "type": "module", "system_manager": 1 } }
mit
4,521,741,137,452,631,000
20.481481
48
0.53046
false
PXke/invenio
invenio/legacy/bibrank/record_sorter.py
1
19472
# -*- coding: utf-8 -*- ## Ranking of records using different parameters and methods on the fly. ## ## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" import string import time import math import re import ConfigParser import copy from six import iteritems from invenio.config import \ CFG_SITE_LANG, \ CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS from invenio.legacy.dbquery import run_sql, deserialize_via_marshal, wash_table_column_name from invenio.ext.logging import register_exception from invenio.legacy.webpage import adderrorbox from invenio.legacy.bibindex.engine_stemmer import stem from invenio.legacy.bibindex.engine_stopwords import is_stopword from invenio.legacy.bibrank.citation_searcher import get_cited_by, get_cited_by_weight from intbitset import intbitset from invenio.legacy.bibrank.word_searcher import find_similar # Do not remove these lines, it is necessary for func_object = globals().get(function) from invenio.legacy.bibrank.word_searcher import word_similarity from invenio.legacy.miscutil.solrutils_bibrank_searcher import word_similarity_solr from invenio.legacy.miscutil.xapianutils_bibrank_searcher import word_similarity_xapian from invenio.modules.ranker.registry import configuration def compare_on_val(first, second): return cmp(second[1], first[1]) def check_term(term, col_size, term_rec, max_occ, min_occ, termlength): """Check if the tem is valid for use term - the term to check col_size - the number of records in database term_rec - the number of records which contains this term max_occ - max frequency of the term allowed min_occ - min frequence of the term allowed termlength - the minimum length of the terms allowed""" try: if is_stopword(term) or (len(term) <= termlength) or ((float(term_rec) / float(col_size)) >= max_occ) or ((float(term_rec) / float(col_size)) <= min_occ): return "" if int(term): return "" except StandardError as e: pass return "true" def create_external_ranking_settings(rank_method_code, config): methods[rank_method_code]['fields'] = dict() sections = config.sections() field_pattern = re.compile('field[0-9]+') for section in sections: if field_pattern.search(section): field_name = config.get(section, 'name') methods[rank_method_code]['fields'][field_name] = dict() for option in config.options(section): if option != 'name': create_external_ranking_option(section, option, methods[rank_method_code]['fields'][field_name], config) elif section == 'find_similar_to_recid': methods[rank_method_code][section] = dict() for option in config.options(section): create_external_ranking_option(section, option, methods[rank_method_code][section], config) elif section == 'field_settings': for option in config.options(section): create_external_ranking_option(section, option, methods[rank_method_code], config) def create_external_ranking_option(section, option, dictionary, config): value = config.get(section, option) if value.isdigit(): value = int(value) dictionary[option] = value def create_rnkmethod_cache(): """Create cache with vital information for each rank method.""" global methods bibrank_meths = run_sql("SELECT name from rnkMETHOD") methods = {} global voutput voutput = "" for (rank_method_code,) in bibrank_meths: try: config_file = configuration.get(rank_method_code + '.cfg', '') config = ConfigParser.ConfigParser() config.readfp(open(config_file)) except StandardError as e: pass cfg_function = config.get("rank_method", "function") if config.has_section(cfg_function): methods[rank_method_code] = {} methods[rank_method_code]["function"] = cfg_function methods[rank_method_code]["prefix"] = config.get(cfg_function, "relevance_number_output_prologue") methods[rank_method_code]["postfix"] = config.get(cfg_function, "relevance_number_output_epilogue") methods[rank_method_code]["chars_alphanumericseparators"] = r"[1234567890\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~]" else: raise Exception("Error in configuration config_file: %s" % (config_file + ".cfg", )) i8n_names = run_sql("""SELECT ln,value from rnkMETHODNAME,rnkMETHOD where id_rnkMETHOD=rnkMETHOD.id and rnkMETHOD.name=%s""", (rank_method_code,)) for (ln, value) in i8n_names: methods[rank_method_code][ln] = value if config.has_option(cfg_function, "table"): methods[rank_method_code]["rnkWORD_table"] = config.get(cfg_function, "table") query = "SELECT count(*) FROM %sR" % wash_table_column_name(methods[rank_method_code]["rnkWORD_table"][:-1]) methods[rank_method_code]["col_size"] = run_sql(query)[0][0] if config.has_option(cfg_function, "stemming") and config.get(cfg_function, "stemming"): try: methods[rank_method_code]["stemmer"] = config.get(cfg_function, "stemming") except Exception as e: pass if config.has_option(cfg_function, "stopword"): methods[rank_method_code]["stopwords"] = config.get(cfg_function, "stopword") if config.has_section("find_similar"): methods[rank_method_code]["max_word_occurence"] = float(config.get("find_similar", "max_word_occurence")) methods[rank_method_code]["min_word_occurence"] = float(config.get("find_similar", "min_word_occurence")) methods[rank_method_code]["min_word_length"] = int(config.get("find_similar", "min_word_length")) methods[rank_method_code]["min_nr_words_docs"] = int(config.get("find_similar", "min_nr_words_docs")) methods[rank_method_code]["max_nr_words_upper"] = int(config.get("find_similar", "max_nr_words_upper")) methods[rank_method_code]["max_nr_words_lower"] = int(config.get("find_similar", "max_nr_words_lower")) methods[rank_method_code]["default_min_relevance"] = int(config.get("find_similar", "default_min_relevance")) if cfg_function in ('word_similarity_solr', 'word_similarity_xapian'): create_external_ranking_settings(rank_method_code, config) if config.has_section("combine_method"): i = 1 methods[rank_method_code]["combine_method"] = [] while config.has_option("combine_method", "method%s" % i): methods[rank_method_code]["combine_method"].append(string.split(config.get("combine_method", "method%s" % i), ",")) i += 1 def is_method_valid(colID, rank_method_code): """ Check if RANK_METHOD_CODE method is valid for the collection given. If colID is None, then check for existence regardless of collection. """ if colID is None: return run_sql("SELECT COUNT(*) FROM rnkMETHOD WHERE name=%s", (rank_method_code,))[0][0] enabled_colls = dict(run_sql("SELECT id_collection, score from collection_rnkMETHOD,rnkMETHOD WHERE id_rnkMETHOD=rnkMETHOD.id AND name=%s", (rank_method_code,))) try: colID = int(colID) except TypeError: return 0 if colID in enabled_colls: return 1 else: while colID: colID = run_sql("SELECT id_dad FROM collection_collection WHERE id_son=%s", (colID,)) if colID and colID[0][0] in enabled_colls: return 1 elif colID: colID = colID[0][0] return 0 def get_bibrank_methods(colID, ln=CFG_SITE_LANG): """ Return a list of rank methods enabled for collection colID and the name of them in the language defined by the ln parameter. """ if 'methods' not in globals(): create_rnkmethod_cache() avail_methods = [] for (rank_method_code, options) in iteritems(methods): if "function" in options and is_method_valid(colID, rank_method_code): if ln in options: avail_methods.append((rank_method_code, options[ln])) elif CFG_SITE_LANG in options: avail_methods.append((rank_method_code, options[CFG_SITE_LANG])) else: avail_methods.append((rank_method_code, rank_method_code)) return avail_methods def rank_records(rank_method_code, rank_limit_relevance, hitset_global, pattern=[], verbose=0, field='', rg=None, jrec=None): """rank_method_code, e.g. `jif' or `sbr' (word frequency vector model) rank_limit_relevance, e.g. `23' for `nbc' (number of citations) or `0.10' for `vec' hitset, search engine hits; pattern, search engine query or record ID (you check the type) verbose, verbose level output: list of records list of rank values prefix postfix verbose_output""" voutput = "" configcreated = "" starttime = time.time() afterfind = starttime - time.time() aftermap = starttime - time.time() try: hitset = copy.deepcopy(hitset_global) #we are receiving a global hitset if 'methods' not in globals(): create_rnkmethod_cache() function = methods[rank_method_code]["function"] #we get 'citation' method correctly here func_object = globals().get(function) if verbose > 0: voutput += "function: %s <br/> " % function voutput += "pattern: %s <br/>" % str(pattern) if func_object and pattern and pattern[0][0:6] == "recid:" and function == "word_similarity": result = find_similar(rank_method_code, pattern[0][6:], hitset, rank_limit_relevance, verbose, methods) elif rank_method_code == "citation": #we get rank_method_code correctly here. pattern[0] is the search word - not used by find_cit p = "" if pattern and pattern[0]: p = pattern[0][6:] result = find_citations(rank_method_code, p, hitset, verbose) elif func_object: if function == "word_similarity": result = func_object(rank_method_code, pattern, hitset, rank_limit_relevance, verbose, methods) elif function in ("word_similarity_solr", "word_similarity_xapian"): if not rg: rg = CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS if not jrec: jrec = 0 ranked_result_amount = rg + jrec if verbose > 0: voutput += "Ranked result amount: %s<br/><br/>" % ranked_result_amount if verbose > 0: voutput += "field: %s<br/>" % field if function == "word_similarity_solr": if verbose > 0: voutput += "In Solr part:<br/>" result = word_similarity_solr(pattern, hitset, methods[rank_method_code], verbose, field, ranked_result_amount) if function == "word_similarity_xapian": if verbose > 0: voutput += "In Xapian part:<br/>" result = word_similarity_xapian(pattern, hitset, methods[rank_method_code], verbose, field, ranked_result_amount) else: result = func_object(rank_method_code, pattern, hitset, rank_limit_relevance, verbose) else: result = rank_by_method(rank_method_code, pattern, hitset, rank_limit_relevance, verbose) except Exception as e: register_exception() result = (None, "", adderrorbox("An error occured when trying to rank the search result "+rank_method_code, ["Unexpected error: %s<br />" % (e,)]), voutput) afterfind = time.time() - starttime if result[0] and result[1]: #split into two lists for search_engine results_similar_recIDs = map(lambda x: x[0], result[0]) results_similar_relevances = map(lambda x: x[1], result[0]) result = (results_similar_recIDs, results_similar_relevances, result[1], result[2], "%s" % configcreated + result[3]) aftermap = time.time() - starttime; else: result = (None, None, result[1], result[2], result[3]) #add stuff from here into voutput from result tmp = voutput+result[4] if verbose > 0: tmp += "<br/>Elapsed time after finding: "+str(afterfind)+"\nElapsed after mapping: "+str(aftermap) result = (result[0],result[1],result[2],result[3],tmp) #dbg = string.join(map(str,methods[rank_method_code].items())) #result = (None, "", adderrorbox("Debug ",rank_method_code+" "+dbg),"",voutput); return result def combine_method(rank_method_code, pattern, hitset, rank_limit_relevance,verbose): """combining several methods into one based on methods/percentage in config file""" global voutput result = {} try: for (method, percent) in methods[rank_method_code]["combine_method"]: function = methods[method]["function"] func_object = globals().get(function) percent = int(percent) if func_object: this_result = func_object(method, pattern, hitset, rank_limit_relevance, verbose)[0] else: this_result = rank_by_method(method, pattern, hitset, rank_limit_relevance, verbose)[0] for i in range(0, len(this_result)): (recID, value) = this_result[i] if value > 0: result[recID] = result.get(recID, 0) + int((float(i) / len(this_result)) * float(percent)) result = result.items() result.sort(lambda x, y: cmp(x[1], y[1])) return (result, "(", ")", voutput) except Exception as e: return (None, "Warning: %s method cannot be used for ranking your query." % rank_method_code, "", voutput) def rank_by_method(rank_method_code, lwords, hitset, rank_limit_relevance,verbose): """Ranking of records based on predetermined values. input: rank_method_code - the code of the method, from the name field in rnkMETHOD, used to get predetermined values from rnkMETHODDATA lwords - a list of words from the query hitset - a list of hits for the query found by search_engine rank_limit_relevance - show only records with a rank value above this verbose - verbose value output: reclist - a list of sorted records, with unsorted added to the end: [[23,34], [344,24], [1,01]] prefix - what to show before the rank value postfix - what to show after the rank value voutput - contains extra information, content dependent on verbose value""" global voutput voutput = "" rnkdict = run_sql("SELECT relevance_data FROM rnkMETHODDATA,rnkMETHOD where rnkMETHOD.id=id_rnkMETHOD and rnkMETHOD.name=%s", (rank_method_code,)) if not rnkdict: return (None, "Warning: Could not load ranking data for method %s." % rank_method_code, "", voutput) max_recid = 0 res = run_sql("SELECT max(id) FROM bibrec") if res and res[0][0]: max_recid = int(res[0][0]) lwords_hitset = None for j in range(0, len(lwords)): #find which docs to search based on ranges..should be done in search_engine... if lwords[j] and lwords[j][:6] == "recid:": if not lwords_hitset: lwords_hitset = intbitset() lword = lwords[j][6:] if string.find(lword, "->") > -1: lword = string.split(lword, "->") if int(lword[0]) >= max_recid or int(lword[1]) >= max_recid + 1: return (None, "Warning: Given record IDs are out of range.", "", voutput) for i in range(int(lword[0]), int(lword[1])): lwords_hitset.add(int(i)) elif lword < max_recid + 1: lwords_hitset.add(int(lword)) else: return (None, "Warning: Given record IDs are out of range.", "", voutput) rnkdict = deserialize_via_marshal(rnkdict[0][0]) if verbose > 0: voutput += "<br />Running rank method: %s, using rank_by_method function in bibrank_record_sorter<br />" % rank_method_code voutput += "Ranking data loaded, size of structure: %s<br />" % len(rnkdict) lrecIDs = list(hitset) if verbose > 0: voutput += "Number of records to rank: %s<br />" % len(lrecIDs) reclist = [] reclist_addend = [] if not lwords_hitset: #rank all docs, can this be speed up using something else than for loop? for recID in lrecIDs: if recID in rnkdict: reclist.append((recID, rnkdict[recID])) del rnkdict[recID] else: reclist_addend.append((recID, 0)) else: #rank docs in hitset, can this be speed up using something else than for loop? for recID in lwords_hitset: if recID in rnkdict and recID in hitset: reclist.append((recID, rnkdict[recID])) del rnkdict[recID] elif recID in hitset: reclist_addend.append((recID, 0)) if verbose > 0: voutput += "Number of records ranked: %s<br />" % len(reclist) voutput += "Number of records not ranked: %s<br />" % len(reclist_addend) reclist.sort(lambda x, y: cmp(x[1], y[1])) return (reclist_addend + reclist, methods[rank_method_code]["prefix"], methods[rank_method_code]["postfix"], voutput) def find_citations(rank_method_code, recID, hitset, verbose): """Rank by the amount of citations.""" #calculate the cited-by values for all the members of the hitset #returns: ((recordid,weight),prefix,postfix,message) global voutput voutput = "" #If the recID is numeric, return only stuff that cites it. Otherwise return #stuff that cites hitset #try to convert to int recisint = True recidint = 0 try: recidint = int(recID) except: recisint = False ret = [] if recisint: myrecords = get_cited_by(recidint) #this is a simple list ret = get_cited_by_weight(myrecords) else: ret = get_cited_by_weight(hitset) ret.sort(lambda x,y:cmp(x[1],y[1])) #ascending by the second member of the tuples if verbose > 0: voutput = voutput+"\nrecID "+str(recID)+" is int: "+str(recisint)+" hitset "+str(hitset)+"\n"+"find_citations retlist "+str(ret) #voutput = voutput + str(ret) if ret: return (ret,"(", ")", "") else: return ((),"", "", "")
gpl-2.0
-2,718,542,281,756,288,500
42.954853
165
0.622381
false
alexgibson/bedrock
lib/fluent_migrations/firefox/retention/thank-you.py
4
4312
from __future__ import absolute_import import fluent.syntax.ast as FTL from fluent.migrate.helpers import transforms_from from fluent.migrate.helpers import VARIABLE_REFERENCE, TERM_REFERENCE from fluent.migrate import REPLACE, COPY thank_you = "firefox/retention/thank-you.lang" def migrate(ctx): """Migrate bedrock/firefox/templates/firefox/retention/thank-you.html, part {index}.""" ctx.add_transforms( "firefox/retention/thank-you.ftl", "firefox/retention/thank-you.ftl", transforms_from(""" thank-you-thank-you = {COPY(thank_you, "Thank You",)} thank-you-thank-you-page = {COPY(thank_you, "Thank you page.",)} thank-you-its-all-thanks-to-you = {COPY(thank_you, "It’s all thanks to you",)} """, thank_you=thank_you) + [ FTL.Message( id=FTL.Identifier("thank-you-choosing-firefox-helps"), value=REPLACE( thank_you, "Choosing Firefox helps Mozilla make the Internet a better place. Here’s what you can do next:", { "Mozilla": TERM_REFERENCE("brand-name-mozilla"), "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("thank-you-make-firefox-your-default"), value=REPLACE( thank_you, "Make Firefox your default browser", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" thank-you-1-min-action = {COPY(thank_you, "1 min action",)} thank-you-set-as-your-default = {COPY(thank_you, "Set as your default",)} """, thank_you=thank_you) + [ FTL.Message( id=FTL.Identifier("thank-you-get-firefox-on-your-phone"), value=REPLACE( thank_you, "Get Firefox on your phone", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" thank-you-2-min-install = {COPY(thank_you, "2 min install",)} """, thank_you=thank_you) + [ FTL.Message( id=FTL.Identifier("thank-you-download-firefox"), value=REPLACE( thank_you, "Download Firefox", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("thank-you-tell-your-friends-about"), value=REPLACE( thank_you, "Tell your friends about Firefox", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" thank-you-1-min-share = {COPY(thank_you, "1 min share",)} """, thank_you=thank_you) + [ FTL.Message( id=FTL.Identifier("thank-you-join-me-in-the-fight-for"), value=REPLACE( thank_you, "Join me in the fight for an open web by choosing Firefox!", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" thank-you-send-a-tweet = {COPY(thank_you, "Send a tweet",)} thank-you-make-the-internet-a-safer = {COPY(thank_you, "Make the Internet a safer place",)} thank-you-4-min-read = {COPY(thank_you, "4 min read",)} thank-you-stay-in-touch-for-more = {COPY(thank_you, "Stay in touch for more cool stuff",)} """, thank_you=thank_you) + [ FTL.Message( id=FTL.Identifier("thank-you-get-the-latest-greatest"), value=REPLACE( thank_you, "Get the latest & greatest from Firefox delivered straight to your inbox.", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] )
mpl-2.0
4,757,993,256,762,706,000
38.888889
116
0.490019
false
clouserw/zamboni
mkt/submit/tests/test_forms.py
12
14318
from django.forms.fields import BooleanField from django.test.client import RequestFactory from django.utils.safestring import SafeText from django.utils.translation import ugettext_lazy as _ import mock from nose.tools import eq_, ok_ import mkt import mkt.site.tests from mkt.comm.models import CommunicationNote from mkt.constants.features import APP_FEATURES from mkt.developers.models import AppLog from mkt.files.models import FileUpload from mkt.reviewers.models import RereviewQueue from mkt.site.fixtures import fixture from mkt.site.tests import user_factory from mkt.submit import forms from mkt.users.models import UserProfile from mkt.webapps.models import AppFeatures, Webapp class TestNewWebappForm(mkt.site.tests.TestCase): def setUp(self): self.request = RequestFactory().get('/') self.request.user = user_factory() self.file = FileUpload.objects.create(valid=True) self.file.user = self.request.user self.file.save() def test_no_user(self): self.file.user = None self.file.save() form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'], 'upload': self.file.uuid}, request=self.request) assert not form.is_valid() eq_(form.ERRORS['user'], form.errors['free_platforms']) eq_(form.ERRORS['user'], form.errors['paid_platforms']) def test_correct_user(self): form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'], 'upload': self.file.uuid}, request=self.request) assert form.is_valid(), form.errors def test_incorrect_user(self): self.file.user = user_factory() self.file.save() form = forms.NewWebappForm({'upload': self.file.uuid}, request=self.request) assert not form.is_valid() eq_(form.ERRORS['user'], form.errors['free_platforms']) eq_(form.ERRORS['user'], form.errors['paid_platforms']) def test_not_free_or_paid(self): form = forms.NewWebappForm({}) assert not form.is_valid() eq_(form.ERRORS['none'], form.errors['free_platforms']) eq_(form.ERRORS['none'], form.errors['paid_platforms']) def test_paid(self): form = forms.NewWebappForm({'paid_platforms': ['paid-firefoxos'], 'upload': self.file.uuid}, request=self.request) assert form.is_valid() eq_(form.get_paid(), mkt.ADDON_PREMIUM) def test_free(self): form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'], 'upload': self.file.uuid}) assert form.is_valid() eq_(form.get_paid(), mkt.ADDON_FREE) def test_platform(self): mappings = ( ({'free_platforms': ['free-firefoxos']}, [mkt.DEVICE_GAIA]), ({'paid_platforms': ['paid-firefoxos']}, [mkt.DEVICE_GAIA]), ({'free_platforms': ['free-firefoxos', 'free-android-mobile']}, [mkt.DEVICE_GAIA, mkt.DEVICE_MOBILE]), ({'free_platforms': ['free-android-mobile', 'free-android-tablet']}, [mkt.DEVICE_MOBILE, mkt.DEVICE_TABLET]), ) for data, res in mappings: data['upload'] = self.file.uuid form = forms.NewWebappForm(data) assert form.is_valid(), form.errors self.assertSetEqual(res, form.get_devices()) def test_both(self): form = forms.NewWebappForm({'paid_platforms': ['paid-firefoxos'], 'free_platforms': ['free-firefoxos']}, request=self.request) assert not form.is_valid() eq_(form.ERRORS['both'], form.errors['free_platforms']) eq_(form.ERRORS['both'], form.errors['paid_platforms']) def test_multiple(self): form = forms.NewWebappForm({'free_platforms': ['free-firefoxos', 'free-desktop'], 'upload': self.file.uuid}) assert form.is_valid() def test_not_packaged(self): form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'], 'upload': self.file.uuid}) assert form.is_valid(), form.errors assert not form.is_packaged() @mock.patch('mkt.submit.forms.parse_addon', lambda *args: {'version': None}) def test_packaged_allowed_everywhere(self): for device in ('free-firefoxos', 'free-desktop', 'free-android-tablet', 'free-android-mobile'): form = forms.NewWebappForm({'free_platforms': [device], 'upload': self.file.uuid, 'packaged': True}, request=self.request) assert form.is_valid(), form.errors assert form.is_packaged() class TestNewWebappVersionForm(mkt.site.tests.TestCase): def setUp(self): self.request = RequestFactory().get('/') self.file = FileUpload.objects.create(valid=True) def test_no_upload(self): form = forms.NewWebappVersionForm(request=self.request, is_packaged=True) assert not form.is_valid(), form.errors @mock.patch('mkt.submit.forms.parse_addon', lambda *args: {"origin": "app://hy.fr"}) @mock.patch('mkt.submit.forms.verify_app_domain') def test_verify_app_domain_called(self, _verify): self.create_switch('webapps-unique-by-domain') form = forms.NewWebappVersionForm({'upload': self.file.uuid}, request=self.request, is_packaged=True) assert form.is_valid(), form.errors assert _verify.called @mock.patch('mkt.submit.forms.parse_addon', lambda *args: {"origin": "app://hy.fr"}) def test_verify_app_domain_exclude_same(self): app = mkt.site.tests.app_factory(app_domain='app://hy.fr') form = forms.NewWebappVersionForm( {'upload': self.file.uuid}, request=self.request, is_packaged=True, addon=app) assert form.is_valid(), form.errors @mock.patch('mkt.submit.forms.parse_addon', lambda *args: {"origin": "app://hy.fr"}) def test_verify_app_domain_exclude_different(self): app = mkt.site.tests.app_factory(app_domain='app://yo.lo') mkt.site.tests.app_factory(app_domain='app://hy.fr') form = forms.NewWebappVersionForm( {'upload': self.file.uuid}, request=self.request, is_packaged=True, addon=app) assert not form.is_valid(), form.errors assert ('An app already exists on this domain; ' 'only one app per domain is allowed.' in form.errors['upload']) class TestAppDetailsBasicForm(mkt.site.tests.TestCase): fixtures = fixture('user_999', 'webapp_337141') def setUp(self): self.request = mock.Mock() self.request.user = UserProfile.objects.get(id=999) self.request.groups = () def get_app(self): return Webapp.objects.get(pk=337141) def get_data(self, **kwargs): default = { 'app_slug': 'thisIsAslug', 'description': '...', 'privacy_policy': '...', 'support_email': '[email protected]', 'notes': '', 'publish_type': mkt.PUBLISH_IMMEDIATE, } default.update(kwargs) return default def test_slug(self): app = self.get_app() form = forms.AppDetailsBasicForm(self.get_data(), request=self.request, instance=app) assert form.is_valid(), form.errors form.save() eq_(app.app_slug, 'thisisaslug') def test_comm_thread(self): app = self.get_app() note_body = 'please approve this app' form = forms.AppDetailsBasicForm(self.get_data(notes=note_body), request=self.request, instance=app) assert form.is_valid(), form.errors form.save() notes = CommunicationNote.objects.all() eq_(notes.count(), 1) eq_(notes[0].body, note_body) def test_publish_type(self): app = self.get_app() form = forms.AppDetailsBasicForm( self.get_data(publish_type=mkt.PUBLISH_PRIVATE), request=self.request, instance=app) assert form.is_valid(), form.errors form.save() eq_(app.publish_type, mkt.PUBLISH_PRIVATE) def test_help_text_uses_safetext_and_includes_url(self): app = self.get_app() form = forms.AppDetailsBasicForm( self.get_data(publish_type=mkt.PUBLISH_PRIVATE), request=self.request, instance=app) help_text = form.base_fields['privacy_policy'].help_text eq_(type(help_text), SafeText) ok_('{url}' not in help_text) ok_(form.PRIVACY_MDN_URL in help_text) def test_is_offline_guess_false(self): app = self.get_app() app.guess_is_offline = lambda: False assert not app.is_offline forms.AppDetailsBasicForm( self.get_data(), request=self.request, instance=app) assert not app.is_offline def test_is_offline_guess_false_override(self): app = self.get_app() app.guess_is_offline = lambda: False form = forms.AppDetailsBasicForm( self.get_data(is_offline=True), request=self.request, instance=app) assert form.is_valid(), form.errors form.save() eq_(app.is_offline, True) def test_is_offline_guess_true(self): app = self.get_app() app.guess_is_offline = lambda: True assert not app.is_offline forms.AppDetailsBasicForm( self.get_data(is_offline=None), request=self.request, instance=app) assert app.is_offline def test_is_offline_guess_true_override(self): app = self.get_app() app.guess_is_offline = lambda: True form = forms.AppDetailsBasicForm( self.get_data(is_offline=False), request=self.request, instance=app) assert form.is_valid(), form.errors form.save() eq_(app.is_offline, False) def test_tags(self): app = self.get_app() form = forms.AppDetailsBasicForm( self.get_data(tags='card games, poker'), request=self.request, instance=app) assert form.is_valid(), form.errors form.save() eq_(app.tags.count(), 2) self.assertSetEqual( app.tags.values_list('tag_text', flat=True), ['card games', 'poker']) class TestAppFeaturesForm(mkt.site.tests.TestCase): fixtures = fixture('user_999', 'webapp_337141') def setUp(self): mkt.set_user(UserProfile.objects.all()[0]) self.form = forms.AppFeaturesForm() self.app = Webapp.objects.get(pk=337141) self.features = self.app.current_version.features def _check_log(self, action): assert AppLog.objects.filter( addon=self.app, activity_log__action=action.id).exists(), ( "Didn't find `%s` action in logs." % action.short) def test_required(self): f_names = self.form.fields.keys() for value in (True, False): form = forms.AppFeaturesForm(dict((n, value) for n in f_names)) eq_(form.is_valid(), True, form.errors) def test_correct_fields(self): fields = self.form.fields f_values = fields.values() assert 'version' not in fields assert all(isinstance(f, BooleanField) for f in f_values) self.assertSetEqual(fields, AppFeatures()._fields()) def test_required_api_fields(self): fields = [f.help_text for f in self.form.required_api_fields()] eq_(fields, sorted(f['name'] for f in APP_FEATURES.values())) def test_required_api_fields_nonascii(self): forms.AppFeaturesForm.base_fields['has_apps'].help_text = _( u'H\xe9llo') fields = [f.help_text for f in self.form.required_api_fields()] eq_(fields, sorted(f['name'] for f in APP_FEATURES.values())) def test_changes_mark_for_rereview(self): self.features.update(has_sms=True) data = {'has_apps': True} self.form = forms.AppFeaturesForm(instance=self.features, data=data) self.form.save() ok_(self.features.has_apps) ok_(not self.features.has_sms) ok_(not self.features.has_contacts) action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id assert AppLog.objects.filter(addon=self.app, activity_log__action=action_id).exists() eq_(RereviewQueue.objects.count(), 1) def test_no_changes_not_marked_for_rereview(self): self.features.update(has_sms=True) data = {'has_sms': True} self.form = forms.AppFeaturesForm(instance=self.features, data=data) self.form.save() ok_(not self.features.has_apps) ok_(self.features.has_sms) eq_(RereviewQueue.objects.count(), 0) action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id assert not AppLog.objects.filter( addon=self.app, activity_log__action=action_id).exists() def test_changes_mark_for_rereview_bypass(self): self.features.update(has_sms=True) data = {'has_apps': True} self.form = forms.AppFeaturesForm(instance=self.features, data=data) self.form.save(mark_for_rereview=False) ok_(self.features.has_apps) ok_(not self.features.has_sms) eq_(RereviewQueue.objects.count(), 0) action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id assert not AppLog.objects.filter( addon=self.app, activity_log__action=action_id).exists()
bsd-3-clause
361,190,859,090,516,400
38.443526
79
0.578223
false
kemia/kemia
scripts/addnamespaces.py
1
1486
import os, sys namespaces = ''' var sys = require('sys'); goog={}; goog.array={}; goog.array.forEach={}; goog.debug={}; goog.events={}; goog.events.pools={}; goog.string={}; goog.userAgent={}; goog.userAgent.jscript={}; goog.userAgent.product={}; goog.structs={}; goog.object={}; goog.iter={}; goog.math={}; goog.graphics={}; goog.graphics.VmlGraphics={}; goog.dom={}; goog.dom.classes={}; goog.dom.a11y={}; goog.style={}; goog.ui={}; goog.ui.registry={}; goog.fx={}; goog.editor={}; goog.editor.defines={}; goog.async={}; goog.functions={}; goog.reflect={}; goog.color={}; goog.positioning={}; goog.json={}; goog.asserts={}; goog.i18n={}; kemia={}; kemia.controller={}; kemia.controller.ToolbarFactory={}; kemia.controller.DefaultToolbar={}; kemia.controller.plugins={}; kemia.view={}; kemia.math={}; kemia.graphics={}; kemia.resource={}; kemia.model={}; kemia.ring={}; kemia.ring.Hanser={}; kemia.ring.SSSR={}; kemia.query={}; kemia.io={}; kemia.io.json={}; kemia.io.mdl={}; kemia.io.smiles={}; kemia.query={}; var document=this; ''' if len(sys.argv) < 2: print 'Usage: addnamespaces.py file' else: inFilename = sys.argv[1] input = open(inFilename) outFilename = inFilename outFilename.replace('.js', '.node.js') if len(sys.argv) > 2: outFilename = sys.argv[2] output = open(outFilename, 'w') isFirstLine = True for s in input.xreadlines(): if (isFirstLine): output.write(s.replace('var goog=goog||{};', namespaces)) else: output.write(s)
apache-2.0
-2,841,521,769,973,920,300
18.552632
63
0.652086
false
ageron/tensorflow
tensorflow/contrib/seq2seq/python/ops/helper.py
11
28495
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A library of helpers for use with SamplingDecoders. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.contrib.seq2seq.python.ops import decoder from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.util import nest __all__ = [ "Helper", "TrainingHelper", "GreedyEmbeddingHelper", "SampleEmbeddingHelper", "CustomHelper", "ScheduledEmbeddingTrainingHelper", "ScheduledOutputTrainingHelper", "InferenceHelper", ] _transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access # The following sample functions (_call_sampler, bernoulli_sample, # categorical_sample) mimic TensorFlow Probability distribution semantics. def _call_sampler(sample_n_fn, sample_shape, name=None): """Reshapes vector of samples.""" with ops.name_scope(name, "call_sampler", values=[sample_shape]): sample_shape = ops.convert_to_tensor( sample_shape, dtype=dtypes.int32, name="sample_shape") # Ensure sample_shape is a vector (vs just a scalar). pad = math_ops.cast(math_ops.equal(array_ops.rank(sample_shape), 0), dtypes.int32) sample_shape = array_ops.reshape( sample_shape, array_ops.pad(array_ops.shape(sample_shape), paddings=[[pad, 0]], constant_values=1)) samples = sample_n_fn(math_ops.reduce_prod(sample_shape)) batch_event_shape = array_ops.shape(samples)[1:] final_shape = array_ops.concat([sample_shape, batch_event_shape], 0) return array_ops.reshape(samples, final_shape) def bernoulli_sample(probs=None, logits=None, dtype=dtypes.int32, sample_shape=(), seed=None): """Samples from Bernoulli distribution.""" if probs is None: probs = math_ops.sigmoid(logits, name="probs") else: probs = ops.convert_to_tensor(probs, name="probs") batch_shape_tensor = array_ops.shape(probs) def _sample_n(n): """Sample vector of Bernoullis.""" new_shape = array_ops.concat([[n], batch_shape_tensor], 0) uniform = random_ops.random_uniform( new_shape, seed=seed, dtype=probs.dtype) return math_ops.cast(math_ops.less(uniform, probs), dtype) return _call_sampler(_sample_n, sample_shape) def categorical_sample(logits, dtype=dtypes.int32, sample_shape=(), seed=None): """Samples from categorical distribution.""" logits = ops.convert_to_tensor(logits, name="logits") event_size = array_ops.shape(logits)[-1] batch_shape_tensor = array_ops.shape(logits)[:-1] def _sample_n(n): """Sample vector of categoricals.""" if logits.shape.ndims == 2: logits_2d = logits else: logits_2d = array_ops.reshape(logits, [-1, event_size]) sample_dtype = dtypes.int64 if logits.dtype.size > 4 else dtypes.int32 draws = random_ops.multinomial( logits_2d, n, seed=seed, output_dtype=sample_dtype) draws = array_ops.reshape( array_ops.transpose(draws), array_ops.concat([[n], batch_shape_tensor], 0)) return math_ops.cast(draws, dtype) return _call_sampler(_sample_n, sample_shape) def _unstack_ta(inp): return tensor_array_ops.TensorArray( dtype=inp.dtype, size=array_ops.shape(inp)[0], element_shape=inp.get_shape()[1:]).unstack(inp) @six.add_metaclass(abc.ABCMeta) class Helper(object): """Interface for implementing sampling in seq2seq decoders. Helper instances are used by `BasicDecoder`. """ @abc.abstractproperty def batch_size(self): """Batch size of tensor returned by `sample`. Returns a scalar int32 tensor. """ raise NotImplementedError("batch_size has not been implemented") @abc.abstractproperty def sample_ids_shape(self): """Shape of tensor returned by `sample`, excluding the batch dimension. Returns a `TensorShape`. """ raise NotImplementedError("sample_ids_shape has not been implemented") @abc.abstractproperty def sample_ids_dtype(self): """DType of tensor returned by `sample`. Returns a DType. """ raise NotImplementedError("sample_ids_dtype has not been implemented") @abc.abstractmethod def initialize(self, name=None): """Returns `(initial_finished, initial_inputs)`.""" pass @abc.abstractmethod def sample(self, time, outputs, state, name=None): """Returns `sample_ids`.""" pass @abc.abstractmethod def next_inputs(self, time, outputs, state, sample_ids, name=None): """Returns `(finished, next_inputs, next_state)`.""" pass class CustomHelper(Helper): """Base abstract class that allows the user to customize sampling.""" def __init__(self, initialize_fn, sample_fn, next_inputs_fn, sample_ids_shape=None, sample_ids_dtype=None): """Initializer. Args: initialize_fn: callable that returns `(finished, next_inputs)` for the first iteration. sample_fn: callable that takes `(time, outputs, state)` and emits tensor `sample_ids`. next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)` and emits `(finished, next_inputs, next_state)`. sample_ids_shape: Either a list of integers, or a 1-D Tensor of type `int32`, the shape of each value in the `sample_ids` batch. Defaults to a scalar. sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32. """ self._initialize_fn = initialize_fn self._sample_fn = sample_fn self._next_inputs_fn = next_inputs_fn self._batch_size = None self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or []) self._sample_ids_dtype = sample_ids_dtype or dtypes.int32 @property def batch_size(self): if self._batch_size is None: raise ValueError("batch_size accessed before initialize was called") return self._batch_size @property def sample_ids_shape(self): return self._sample_ids_shape @property def sample_ids_dtype(self): return self._sample_ids_dtype def initialize(self, name=None): with ops.name_scope(name, "%sInitialize" % type(self).__name__): (finished, next_inputs) = self._initialize_fn() if self._batch_size is None: self._batch_size = array_ops.size(finished) return (finished, next_inputs) def sample(self, time, outputs, state, name=None): with ops.name_scope( name, "%sSample" % type(self).__name__, (time, outputs, state)): return self._sample_fn(time=time, outputs=outputs, state=state) def next_inputs(self, time, outputs, state, sample_ids, name=None): with ops.name_scope( name, "%sNextInputs" % type(self).__name__, (time, outputs, state)): return self._next_inputs_fn( time=time, outputs=outputs, state=state, sample_ids=sample_ids) class TrainingHelper(Helper): """A helper for use during training. Only reads inputs. Returned sample_ids are the argmax of the RNN output logits. """ def __init__(self, inputs, sequence_length, time_major=False, name=None): """Initializer. Args: inputs: A (structure of) input tensors. sequence_length: An int32 vector tensor. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. name: Name scope for any created operations. Raises: ValueError: if `sequence_length` is not a 1D tensor. """ with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]): inputs = ops.convert_to_tensor(inputs, name="inputs") self._inputs = inputs if not time_major: inputs = nest.map_structure(_transpose_batch_time, inputs) self._input_tas = nest.map_structure(_unstack_ta, inputs) self._sequence_length = ops.convert_to_tensor( sequence_length, name="sequence_length") if self._sequence_length.get_shape().ndims != 1: raise ValueError( "Expected sequence_length to be a vector, but received shape: %s" % self._sequence_length.get_shape()) self._zero_inputs = nest.map_structure( lambda inp: array_ops.zeros_like(inp[0, :]), inputs) self._batch_size = array_ops.size(sequence_length) @property def inputs(self): return self._inputs @property def sequence_length(self): return self._sequence_length @property def batch_size(self): return self._batch_size @property def sample_ids_shape(self): return tensor_shape.TensorShape([]) @property def sample_ids_dtype(self): return dtypes.int32 def initialize(self, name=None): with ops.name_scope(name, "TrainingHelperInitialize"): finished = math_ops.equal(0, self._sequence_length) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, lambda: self._zero_inputs, lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas)) return (finished, next_inputs) def sample(self, time, outputs, name=None, **unused_kwargs): with ops.name_scope(name, "TrainingHelperSample", [time, outputs]): sample_ids = math_ops.cast( math_ops.argmax(outputs, axis=-1), dtypes.int32) return sample_ids def next_inputs(self, time, outputs, state, name=None, **unused_kwargs): """next_inputs_fn for TrainingHelper.""" with ops.name_scope(name, "TrainingHelperNextInputs", [time, outputs, state]): next_time = time + 1 finished = (next_time >= self._sequence_length) all_finished = math_ops.reduce_all(finished) def read_from_ta(inp): return inp.read(next_time) next_inputs = control_flow_ops.cond( all_finished, lambda: self._zero_inputs, lambda: nest.map_structure(read_from_ta, self._input_tas)) return (finished, next_inputs, state) class ScheduledEmbeddingTrainingHelper(TrainingHelper): """A training helper that adds scheduled sampling. Returns -1s for sample_ids where no sampling took place; valid sample id values elsewhere. """ def __init__(self, inputs, sequence_length, embedding, sampling_probability, time_major=False, seed=None, scheduling_seed=None, name=None): """Initializer. Args: inputs: A (structure of) input tensors. sequence_length: An int32 vector tensor. embedding: A callable that takes a vector tensor of `ids` (argmax ids), or the `params` argument for `embedding_lookup`. sampling_probability: A 0D `float32` tensor: the probability of sampling categorically from the output ids instead of reading directly from the inputs. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. seed: The sampling seed. scheduling_seed: The schedule decision rule sampling seed. name: Name scope for any created operations. Raises: ValueError: if `sampling_probability` is not a scalar or vector. """ with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper", [embedding, sampling_probability]): if callable(embedding): self._embedding_fn = embedding else: self._embedding_fn = ( lambda ids: embedding_ops.embedding_lookup(embedding, ids)) self._sampling_probability = ops.convert_to_tensor( sampling_probability, name="sampling_probability") if self._sampling_probability.get_shape().ndims not in (0, 1): raise ValueError( "sampling_probability must be either a scalar or a vector. " "saw shape: %s" % (self._sampling_probability.get_shape())) self._seed = seed self._scheduling_seed = scheduling_seed super(ScheduledEmbeddingTrainingHelper, self).__init__( inputs=inputs, sequence_length=sequence_length, time_major=time_major, name=name) def initialize(self, name=None): return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name) def sample(self, time, outputs, state, name=None): with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample", [time, outputs, state]): # Return -1s where we did not sample, and sample_ids elsewhere select_sample = bernoulli_sample( probs=self._sampling_probability, dtype=dtypes.bool, sample_shape=self.batch_size, seed=self._scheduling_seed) return array_ops.where( select_sample, categorical_sample(logits=outputs, seed=self._seed), gen_array_ops.fill([self.batch_size], -1)) def next_inputs(self, time, outputs, state, sample_ids, name=None): with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs", [time, outputs, state, sample_ids]): (finished, base_next_inputs, state) = ( super(ScheduledEmbeddingTrainingHelper, self).next_inputs( time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name)) def maybe_sample(): """Perform scheduled sampling.""" where_sampling = math_ops.cast( array_ops.where(sample_ids > -1), dtypes.int32) where_not_sampling = math_ops.cast( array_ops.where(sample_ids <= -1), dtypes.int32) sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling) inputs_not_sampling = array_ops.gather_nd( base_next_inputs, where_not_sampling) sampled_next_inputs = self._embedding_fn(sample_ids_sampling) base_shape = array_ops.shape(base_next_inputs) return (array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape) + array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, lambda: base_next_inputs, maybe_sample) return (finished, next_inputs, state) class ScheduledOutputTrainingHelper(TrainingHelper): """A training helper that adds scheduled sampling directly to outputs. Returns False for sample_ids where no sampling took place; True elsewhere. """ def __init__(self, inputs, sequence_length, sampling_probability, time_major=False, seed=None, next_inputs_fn=None, auxiliary_inputs=None, name=None): """Initializer. Args: inputs: A (structure) of input tensors. sequence_length: An int32 vector tensor. sampling_probability: A 0D `float32` tensor: the probability of sampling from the outputs instead of reading directly from the inputs. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. seed: The sampling seed. next_inputs_fn: (Optional) callable to apply to the RNN outputs to create the next input when sampling. If `None` (default), the RNN outputs will be used as the next inputs. auxiliary_inputs: An optional (structure of) auxiliary input tensors with a shape that matches `inputs` in all but (potentially) the final dimension. These tensors will be concatenated to the sampled output or the `inputs` when not sampling for use as the next input. name: Name scope for any created operations. Raises: ValueError: if `sampling_probability` is not a scalar or vector. """ with ops.name_scope(name, "ScheduledOutputTrainingHelper", [inputs, auxiliary_inputs, sampling_probability]): self._sampling_probability = ops.convert_to_tensor( sampling_probability, name="sampling_probability") if self._sampling_probability.get_shape().ndims not in (0, 1): raise ValueError( "sampling_probability must be either a scalar or a vector. " "saw shape: %s" % (self._sampling_probability.get_shape())) if auxiliary_inputs is None: maybe_concatenated_inputs = inputs else: inputs = ops.convert_to_tensor(inputs, name="inputs") auxiliary_inputs = ops.convert_to_tensor( auxiliary_inputs, name="auxiliary_inputs") maybe_concatenated_inputs = nest.map_structure( lambda x, y: array_ops.concat((x, y), -1), inputs, auxiliary_inputs) if not time_major: auxiliary_inputs = nest.map_structure( _transpose_batch_time, auxiliary_inputs) self._auxiliary_input_tas = ( nest.map_structure(_unstack_ta, auxiliary_inputs) if auxiliary_inputs is not None else None) self._seed = seed self._next_inputs_fn = next_inputs_fn super(ScheduledOutputTrainingHelper, self).__init__( inputs=maybe_concatenated_inputs, sequence_length=sequence_length, time_major=time_major, name=name) def initialize(self, name=None): return super(ScheduledOutputTrainingHelper, self).initialize(name=name) def sample(self, time, outputs, state, name=None): with ops.name_scope(name, "ScheduledOutputTrainingHelperSample", [time, outputs, state]): return bernoulli_sample( probs=self._sampling_probability, sample_shape=self.batch_size, seed=self._seed) def next_inputs(self, time, outputs, state, sample_ids, name=None): with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs", [time, outputs, state, sample_ids]): (finished, base_next_inputs, state) = ( super(ScheduledOutputTrainingHelper, self).next_inputs( time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name)) sample_ids = math_ops.cast(sample_ids, dtypes.bool) def maybe_sample(): """Perform scheduled sampling.""" def maybe_concatenate_auxiliary_inputs(outputs_, indices=None): """Concatenate outputs with auxiliary inputs, if they exist.""" if self._auxiliary_input_tas is None: return outputs_ next_time = time + 1 auxiliary_inputs = nest.map_structure( lambda ta: ta.read(next_time), self._auxiliary_input_tas) if indices is not None: auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices) return nest.map_structure( lambda x, y: array_ops.concat((x, y), -1), outputs_, auxiliary_inputs) if self._next_inputs_fn is None: return array_ops.where( sample_ids, maybe_concatenate_auxiliary_inputs(outputs), base_next_inputs) where_sampling = math_ops.cast( array_ops.where(sample_ids), dtypes.int32) where_not_sampling = math_ops.cast( array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32) outputs_sampling = array_ops.gather_nd(outputs, where_sampling) inputs_not_sampling = array_ops.gather_nd(base_next_inputs, where_not_sampling) sampled_next_inputs = maybe_concatenate_auxiliary_inputs( self._next_inputs_fn(outputs_sampling), where_sampling) base_shape = array_ops.shape(base_next_inputs) return (array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape) + array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)) all_finished = math_ops.reduce_all(finished) no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids)) next_inputs = control_flow_ops.cond( math_ops.logical_or(all_finished, no_samples), lambda: base_next_inputs, maybe_sample) return (finished, next_inputs, state) class GreedyEmbeddingHelper(Helper): """A helper for use during inference. Uses the argmax of the output (treated as logits) and passes the result through an embedding layer to get the next input. """ def __init__(self, embedding, start_tokens, end_token): """Initializer. Args: embedding: A callable that takes a vector tensor of `ids` (argmax ids), or the `params` argument for `embedding_lookup`. The returned tensor will be passed to the decoder input. start_tokens: `int32` vector shaped `[batch_size]`, the start tokens. end_token: `int32` scalar, the token that marks end of decoding. Raises: ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a scalar. """ if callable(embedding): self._embedding_fn = embedding else: self._embedding_fn = ( lambda ids: embedding_ops.embedding_lookup(embedding, ids)) self._start_tokens = ops.convert_to_tensor( start_tokens, dtype=dtypes.int32, name="start_tokens") self._end_token = ops.convert_to_tensor( end_token, dtype=dtypes.int32, name="end_token") if self._start_tokens.get_shape().ndims != 1: raise ValueError("start_tokens must be a vector") self._batch_size = array_ops.size(start_tokens) if self._end_token.get_shape().ndims != 0: raise ValueError("end_token must be a scalar") self._start_inputs = self._embedding_fn(self._start_tokens) @property def batch_size(self): return self._batch_size @property def sample_ids_shape(self): return tensor_shape.TensorShape([]) @property def sample_ids_dtype(self): return dtypes.int32 def initialize(self, name=None): finished = array_ops.tile([False], [self._batch_size]) return (finished, self._start_inputs) def sample(self, time, outputs, state, name=None): """sample for GreedyEmbeddingHelper.""" del time, state # unused by sample_fn # Outputs are logits, use argmax to get the most probable id if not isinstance(outputs, ops.Tensor): raise TypeError("Expected outputs to be a single Tensor, got: %s" % type(outputs)) sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32) return sample_ids def next_inputs(self, time, outputs, state, sample_ids, name=None): """next_inputs_fn for GreedyEmbeddingHelper.""" del time, outputs # unused by next_inputs_fn finished = math_ops.equal(sample_ids, self._end_token) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, # If we're finished, the next_inputs value doesn't matter lambda: self._start_inputs, lambda: self._embedding_fn(sample_ids)) return (finished, next_inputs, state) class SampleEmbeddingHelper(GreedyEmbeddingHelper): """A helper for use during inference. Uses sampling (from a distribution) instead of argmax and passes the result through an embedding layer to get the next input. """ def __init__(self, embedding, start_tokens, end_token, softmax_temperature=None, seed=None): """Initializer. Args: embedding: A callable that takes a vector tensor of `ids` (argmax ids), or the `params` argument for `embedding_lookup`. The returned tensor will be passed to the decoder input. start_tokens: `int32` vector shaped `[batch_size]`, the start tokens. end_token: `int32` scalar, the token that marks end of decoding. softmax_temperature: (Optional) `float32` scalar, value to divide the logits by before computing the softmax. Larger values (above 1.0) result in more random samples, while smaller values push the sampling distribution towards the argmax. Must be strictly greater than 0. Defaults to 1.0. seed: (Optional) The sampling seed. Raises: ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a scalar. """ super(SampleEmbeddingHelper, self).__init__( embedding, start_tokens, end_token) self._softmax_temperature = softmax_temperature self._seed = seed def sample(self, time, outputs, state, name=None): """sample for SampleEmbeddingHelper.""" del time, state # unused by sample_fn # Outputs are logits, we sample instead of argmax (greedy). if not isinstance(outputs, ops.Tensor): raise TypeError("Expected outputs to be a single Tensor, got: %s" % type(outputs)) if self._softmax_temperature is None: logits = outputs else: logits = outputs / self._softmax_temperature sample_ids = categorical_sample(logits=logits, seed=self._seed) return sample_ids class InferenceHelper(Helper): """A helper to use during inference with a custom sampling function.""" def __init__(self, sample_fn, sample_shape, sample_dtype, start_inputs, end_fn, next_inputs_fn=None): """Initializer. Args: sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`. sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`, the shape of the each sample in the batch returned by `sample_fn`. sample_dtype: the dtype of the sample returned by `sample_fn`. start_inputs: The initial batch of inputs. end_fn: A callable that takes `sample_ids` and emits a `bool` vector shaped `[batch_size]` indicating whether each sample is an end token. next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns the next batch of inputs. If not provided, `sample_ids` is used as the next batch of inputs. """ self._sample_fn = sample_fn self._end_fn = end_fn self._sample_shape = tensor_shape.TensorShape(sample_shape) self._sample_dtype = sample_dtype self._next_inputs_fn = next_inputs_fn self._batch_size = array_ops.shape(start_inputs)[0] self._start_inputs = ops.convert_to_tensor( start_inputs, name="start_inputs") @property def batch_size(self): return self._batch_size @property def sample_ids_shape(self): return self._sample_shape @property def sample_ids_dtype(self): return self._sample_dtype def initialize(self, name=None): finished = array_ops.tile([False], [self._batch_size]) return (finished, self._start_inputs) def sample(self, time, outputs, state, name=None): del time, state # unused by sample return self._sample_fn(outputs) def next_inputs(self, time, outputs, state, sample_ids, name=None): del time, outputs # unused by next_inputs if self._next_inputs_fn is None: next_inputs = sample_ids else: next_inputs = self._next_inputs_fn(sample_ids) finished = self._end_fn(sample_ids) return (finished, next_inputs, state)
apache-2.0
2,127,528,187,332,910,300
37.716033
89
0.653939
false
jasondunsmore/heat
heat/tests/test_common_service_utils.py
7
3171
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from oslo_utils import timeutils import uuid from heat.common import service_utils from heat.db.sqlalchemy import models from heat.tests import common class TestServiceUtils(common.HeatTestCase): def test_status_check(self): service = models.Service() service.id = str(uuid.uuid4()) service.engine_id = str(uuid.uuid4()) service.binary = 'heat-engine' service.hostname = 'host.devstack.org' service.host = 'engine-1' service.report_interval = 60 service.topic = 'engine' service.created_at = timeutils.utcnow() service.deleted_at = None service.updated_at = None service_dict = service_utils.format_service(service) self.assertEqual(service_dict['id'], service.id) self.assertEqual(service_dict['engine_id'], service.engine_id) self.assertEqual(service_dict['host'], service.host) self.assertEqual(service_dict['hostname'], service.hostname) self.assertEqual(service_dict['binary'], service.binary) self.assertEqual(service_dict['topic'], service.topic) self.assertEqual(service_dict['report_interval'], service.report_interval) self.assertEqual(service_dict['created_at'], service.created_at) self.assertEqual(service_dict['updated_at'], service.updated_at) self.assertEqual(service_dict['deleted_at'], service.deleted_at) self.assertEqual(service_dict['status'], 'up') # check again within first report_interval time (60) service_dict = service_utils.format_service(service) self.assertEqual(service_dict['status'], 'up') # check update not happen within report_interval time (60+) service.created_at = (timeutils.utcnow() - datetime.timedelta(0, 70)) service_dict = service_utils.format_service(service) self.assertEqual(service_dict['status'], 'down') # check update happened after report_interval time (60+) service.updated_at = (timeutils.utcnow() - datetime.timedelta(0, 70)) service_dict = service_utils.format_service(service) self.assertEqual(service_dict['status'], 'down') # check update happened within report_interval time (60) service.updated_at = (timeutils.utcnow() - datetime.timedelta(0, 50)) service_dict = service_utils.format_service(service) self.assertEqual(service_dict['status'], 'up')
apache-2.0
8,060,401,556,529,435,000
41.851351
72
0.666351
false
jacquerie/inspire-dojson
inspire_dojson/data/__init__.py
2
1085
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2014-2017 CERN. # # INSPIRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE. If not, see <http://www.gnu.org/licenses/>. # # In applying this license, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """DoJSON model and rules for Data.""" from __future__ import absolute_import, division, print_function from . import rules # noqa: F401 from .model import data # noqa: F401
gpl-3.0
7,282,977,334,080,461,000
37.75
77
0.749309
false
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/compat/numpy/__init__.py
1
2259
""" support numpy compatiblitiy across versions """ import re import numpy as np from distutils.version import LooseVersion from pandas.compat import string_types, string_and_binary_types # turn off all numpy warnings np.seterr(all='ignore') # numpy versioning _np_version = np.version.short_version _nlv = LooseVersion(_np_version) _np_version_under1p8 = _nlv < '1.8' _np_version_under1p9 = _nlv < '1.9' _np_version_under1p10 = _nlv < '1.10' _np_version_under1p11 = _nlv < '1.11' _np_version_under1p12 = _nlv < '1.12' if LooseVersion(_np_version) < '1.7.0': raise ImportError('this version of pandas is incompatible with ' 'numpy < 1.7.0\n' 'your numpy version is {0}.\n' 'Please upgrade numpy to >= 1.7.0 to use ' 'this pandas version'.format(_np_version)) _tz_regex = re.compile('[+-]0000$') def tz_replacer(s): if isinstance(s, string_types): if s.endswith('Z'): s = s[:-1] elif _tz_regex.search(s): s = s[:-5] return s def np_datetime64_compat(s, *args, **kwargs): """ provide compat for construction of strings to numpy datetime64's with tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ if not _np_version_under1p11: s = tz_replacer(s) return np.datetime64(s, *args, **kwargs) def np_array_datetime64_compat(arr, *args, **kwargs): """ provide compat for construction of an array of strings to a np.array(..., dtype=np.datetime64(..)) tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ if not _np_version_under1p11: # is_list_like if hasattr(arr, '__iter__') and not \ isinstance(arr, string_and_binary_types): arr = [tz_replacer(s) for s in arr] else: arr = tz_replacer(arr) return np.array(arr, *args, **kwargs) __all__ = ['np', '_np_version_under1p8', '_np_version_under1p9', '_np_version_under1p10', '_np_version_under1p11', '_np_version_under1p12', ]
mit
-35,845,655,230,070,376
28.337662
74
0.593183
false
s-garbuzov/pybvc
samples/samplenetconf/demos/ctrl_demo10.py
4
9051
#!/usr/bin/python # Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. """ @authors: Sergei Garbuzov @status: Development @version: 1.1.0 """ import time from pybvc.controller.controller import Controller from pybvc.common.status import STATUS from pybvc.controller.netconfnode import NetconfNode from pybvc.common.utils import load_dict_from_file def nc_demo_10(): f = "cfg2.yml" d = {} if(load_dict_from_file(f, d) is False): print("Config file '%s' read error: " % f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd = d['nodePswd'] rundelay = d['rundelay'] except: print ("Failed to get Controller or NETCONF device attributes") exit(0) print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") print ("<<< Demo Start") print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") print ("\n") print ("<<< Creating Controller instance") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) print ("'Controller':") print ctrl.to_json() print "\n" print ("<<< Show NETCONF nodes configured on the Controller") time.sleep(rundelay) result = ctrl.get_netconf_nodes_in_config() status = result.get_status() if(status.eq(STATUS.OK)): print "Nodes configured:" nlist = result.get_data() for item in nlist: print " '{}'".format(item) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) node_configured = False result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print ("\n") print "Failed to get configuration status for the '%s'" % nodeName print ("!!!Demo terminated, reason: %s" % status.detailed()) exit(0) if node_configured: print ("\n") print ("<<< '%s' is already configured on the Controller" % nodeName) print ("Unmounting '%s' from the Controller" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(nodename=nodeName) status = result.get_status() if(status.eq(STATUS.OK)): print ("<<< '%s' NETCONF node was successfully removed " "from the Controller" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print ("\n") time.sleep(rundelay) print ("<<< Creating new '%s' NETCONF node" % nodeName) node = NetconfNode(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print ("'%s':" % nodeName) print node.to_json() print ("\n") print ("<<< Add '%s' NETCONF node to the Controller" % nodeName) time.sleep(rundelay) result = ctrl.add_netconf_node(node) status = result.get_status() if(status.eq(STATUS.OK)): print ("'%s' NETCONF node was successfully added " "to the Controller" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) print status.detailed() exit(0) print "\n" print ("<<< Show NETCONF nodes configured on the Controller") time.sleep(rundelay) result = ctrl.get_netconf_nodes_in_config() status = result.get_status() if(status.eq(STATUS.OK)): print "Nodes configured:" nlist = result.get_data() for item in nlist: print " '{}'".format(item) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print "\n" print ("<<< Find the '%s' NETCONF node on the Controller" % nodeName) time.sleep(rundelay) result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): print ("'%s' node is configured" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print "\n" print ("<<< Show connection status for all NETCONF nodes " "configured on the Controller") time.sleep(rundelay) result = ctrl.get_netconf_nodes_conn_status() status = result.get_status() if(status.eq(STATUS.OK)): print "Nodes connection status:" nlist = result.get_data() for item in nlist: status = "" if (item['connected'] is True): status = "connected" else: status = "not connected" print " '{}' is {}".format(item['node'], status) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print "\n" print ("<<< Show connection status for the '%s' NETCONF node" % nodeName) time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print ("'%s' node is connected" % nodeName) elif (status.eq(STATUS.NODE_DISONNECTED)): print ("'%s' node is not connected" % nodeName) elif (status.eq(STATUS.NODE_NOT_FOUND)): print ("'%s' node is not found" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print "\n" print (">>> Remove '%s' NETCONF node from the Controller" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(node) status = result.get_status() if(status.eq(STATUS.OK)): print ("'%s' NETCONF node was successfully removed " "from the Controller" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print "\n" print ("<<< Show NETCONF nodes configured on the Controller") time.sleep(rundelay) result = ctrl.get_netconf_nodes_in_config() status = result.get_status() if(status.eq(STATUS.OK)): print "Nodes configured:" nlist = result.get_data() for item in nlist: print " '{}'".format(item) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print "\n" print ("<<< Show connection status for the '%s' NETCONF node" % nodeName) time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if (status.eq(STATUS.NODE_CONNECTED)): print ("'%s' node is connected" % nodeName) elif (status.eq(STATUS.NODE_DISONNECTED)): print ("'%s' node is not connected" % nodeName) elif (status.eq(STATUS.NODE_NOT_FOUND)): print ("'%s' node is not found" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print ("\n") print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") print (">>> Demo End") print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") if __name__ == "__main__": nc_demo_10()
bsd-3-clause
-5,867,213,145,645,777,000
33.284091
78
0.595846
false
hiryou/pandora_extractor
src/app/Welcome.py
1
1605
from __init__ import * class Welcome: @staticmethod def disclaimer(): print print "#################################################################################" print "# Welcome to PandoraExtractor, a free software which helps you download your #" print "# favorite music from Pandora to your local computer under mp3 files. #" print "# ---------------------------------------------------------------------------- #" print "# Despite the name 'PandoraExtractor', this program does not stream any song #" print "# from Pandora, nor does Pandora have such mp3 storage for their digital music. #" print "# PandoraExtractor simply reads the list of thumbed-up tracks in your account #" print "# and download them from other free mp3 music provider websites. #" print "# ---------------------------------------------------------------------------- #" print "# The program is restrictedly desgined for personal usage. If you intend to #" print "# use it for other benefits, please take your own risks! #" print "# ---------------------------------------------------------------------------- #" print "# Made by Long Nguyen <[email protected]> #" print "#################################################################################" print try: raw_input("Press any key to continue...") except: print sys.exit(1)
mit
4,235,529,976,819,997,000
60.769231
98
0.420561
false
olivierverdier/sfepy
examples/large_deformation/active_fibres.py
1
4761
# -*- coding: utf-8 -*- import numpy as nm from sfepy import data_dir filename_mesh = data_dir + '/meshes/3d/cylinder.mesh' vf_matrix = 0.5 vf_fibres1 = 0.2 vf_fibres2 = 0.3 options = { 'nls' : 'newton', 'ls' : 'ls', 'ts' : 'ts', 'save_steps' : -1, 'post_process_hook' : 'stress_strain', } fields = { 'displacement': (nm.float64, 3, 'Omega', 1), } materials = { 'solid' : ({ 'K' : vf_matrix * 1e3, # bulk modulus 'mu' : vf_matrix * 20e0, # shear modulus of neoHookean term },), 'f1' : 'get_pars_fibres1', 'f2' : 'get_pars_fibres2', } def get_pars_fibres(ts, coors, mode=None, which=0, vf=1.0): """ Parameters ---------- ts : TimeStepper Time stepping info. coors : array_like The physical domain coordinates where the parameters shound be defined. mode : 'qp' or 'special' Call mode. which : int Fibre system id. vf : float Fibre system volume fraction. """ if mode != 'qp': return fmax = 10.0 eps_opt = 0.01 s = 1.0 tt = ts.nt * 2.0 * nm.pi if which == 0: # system 1 fdir = nm.array([1.0, 0.0, 0.0], dtype=nm.float64) act = 0.5 * (1.0 + nm.sin(tt - (0.5 * nm.pi))) elif which == 1: # system 2 fdir = nm.array([0.0, 1.0, 0.0], dtype=nm.float64) act = 0.5 * (1.0 + nm.sin(tt + (0.5 * nm.pi))) else: raise ValueError('unknown fibre system! (%d)' % which) fdir.shape = (3, 1) fdir /= nm.linalg.norm(fdir) print act shape = (coors.shape[0], 1, 1) out = { 'fmax' : vf * nm.tile(fmax, shape), 'eps_opt' : nm.tile(eps_opt, shape), 's' : nm.tile(s, shape), 'fdir' : nm.tile(fdir, shape), 'act' : nm.tile(act, shape), } return out functions = { 'get_pars_fibres1' : (lambda ts, coors, mode=None, region=None, ig=None: get_pars_fibres(ts, coors, mode=mode, which=0, vf=vf_fibres1),), 'get_pars_fibres2' : (lambda ts, coors, mode=None, region=None, ig=None: get_pars_fibres(ts, coors, mode=mode, which=1, vf=vf_fibres2),), } variables = { 'u' : ('unknown field', 'displacement', 0), 'v' : ('test field', 'displacement', 'u'), } regions = { 'Omega' : ('all', {}), 'Left' : ('nodes in (x < 0.001)', {}), 'Right' : ('nodes in (x > 0.099)', {}), } ## # Dirichlet BC. ebcs = { 'l' : ('Left', {'u.all' : 0.0}), } ## # Balance of forces. integral_1 = { 'name' : 'i1', 'kind' : 'v', 'quadrature' : 'gauss_o1_d3', } equations = { 'balance' : """dw_tl_he_neohook.i1.Omega( solid.mu, v, u ) + dw_tl_bulk_penalty.i1.Omega( solid.K, v, u ) + dw_tl_fib_a.i1.Omega( f1.fmax, f1.eps_opt, f1.s, f1.fdir, f1.act, v, u ) + dw_tl_fib_a.i1.Omega( f2.fmax, f2.eps_opt, f2.s, f2.fdir, f2.act, v, u ) = 0""", } def stress_strain(out, problem, state, extend=False): from sfepy.base.base import Struct, debug ev = problem.evaluate strain = ev('dw_tl_he_neohook.i1.Omega( solid.mu, v, u )', mode='el_avg', term_mode='strain') out['green_strain'] = Struct(name='output_data', mode='cell', data=strain, dofs=None) stress = ev('dw_tl_he_neohook.i1.Omega( solid.mu, v, u )', mode='el_avg', term_mode='stress') out['neohook_stress'] = Struct(name='output_data', mode='cell', data=stress, dofs=None ) stress = ev('dw_tl_bulk_penalty.i1.Omega( solid.K, v, u )', mode='el_avg', term_mode= 'stress') out['bulk_stress'] = Struct(name='output_data', mode='cell', data=stress, dofs=None) return out ## # Solvers etc. solver_0 = { 'name' : 'ls', 'kind' : 'ls.scipy_direct', } solver_1 = { 'name' : 'newton', 'kind' : 'nls.newton', 'i_max' : 7, 'eps_a' : 1e-10, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red). 'ls_red' : 0.1, 'ls_red_warp': 0.001, 'ls_on' : 1.1, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6, 'is_plot' : False, 'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max) } solver_2 = { 'name' : 'ts', 'kind' : 'ts.simple', 't0' : 0, 't1' : 1, 'dt' : None, 'n_step' : 21, # has precedence over dt! } ## # FE assembling parameters. fe = { 'chunk_size' : 100000, 'cache_override' : False, }
bsd-3-clause
8,988,453,457,501,024,000
24.057895
79
0.485822
false
petricm/DIRAC
DataManagementSystem/Service/DataIntegrityHandler.py
9
6693
""" :mod: DataIntegrityHandler .. module: DataIntegrityHandler :synopsis: DataIntegrityHandler is the implementation of the Data Integrity service in the DISET framework """ ## imports from types import DictType, IntType, LongType, ListType, StringTypes ## from DIRAC from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC import gLogger, S_OK from DIRAC.DataManagementSystem.DB.DataIntegrityDB import DataIntegrityDB __RCSID__ = "$Id$" # This is a global instance of the DataIntegrityDB class gDataIntegrityDB = False def initializeDataIntegrityHandler( serviceInfo ): """ Check that we can connect to the DB and that the tables are properly created or updated """ global gDataIntegrityDB gDataIntegrityDB = DataIntegrityDB() res = gDataIntegrityDB._connect() if not res['OK']: return res res = gDataIntegrityDB._checkTable() if not res['OK'] and not res['Message'] == 'The requested table already exist': return res return S_OK() class DataIntegrityHandler( RequestHandler ): """ .. class:: DataIntegrityHandler Implementation of the Data Integrity service in the DISET framework. """ types_removeProblematic = [ [IntType, LongType, ListType] ] @staticmethod def export_removeProblematic( fileID ): """ Remove the file with the supplied FileID from the database """ if type( fileID ) == ListType: fileIDs = fileID else: fileIDs = [int( fileID )] gLogger.info( "DataIntegrityHandler.removeProblematic: Attempting to remove problematic." ) res = gDataIntegrityDB.removeProblematic( fileIDs ) if not res['OK']: gLogger.error( "DataIntegrityHandler.removeProblematic: Failed to remove problematic.", res['Message'] ) return res types_getProblematic = [] @staticmethod def export_getProblematic(): """ Get the next problematic to resolve from the IntegrityDB """ gLogger.info( "DataIntegrityHandler.getProblematic: Getting file to resolve." ) res = gDataIntegrityDB.getProblematic() if not res['OK']: gLogger.error( "DataIntegrityHandler.getProblematic: Failed to get problematic file to resolve.", res['Message'] ) return res types_getPrognosisProblematics = [StringTypes] @staticmethod def export_getPrognosisProblematics( prognosis ): """ Get problematic files from the problematics table of the IntegrityDB """ gLogger.info( "DataIntegrityHandler.getPrognosisProblematics: Getting files with %s prognosis." % prognosis ) res = gDataIntegrityDB.getPrognosisProblematics( prognosis ) if not res['OK']: gLogger.error( "DataIntegrityHandler.getPrognosisProblematics: Failed to get prognosis files.", res['Message'] ) return res types_setProblematicStatus = [[IntType, LongType], StringTypes] @staticmethod def export_setProblematicStatus( fileID, status ): """ Update the status of the problematics with the provided fileID """ gLogger.info( "DataIntegrityHandler.setProblematicStatus: Setting file %s status to %s." % ( fileID, status ) ) res = gDataIntegrityDB.setProblematicStatus( fileID, status ) if not res['OK']: gLogger.error( "DataIntegrityHandler.setProblematicStatus: Failed to set status.", res['Message'] ) return res types_incrementProblematicRetry = [[IntType, LongType]] @staticmethod def export_incrementProblematicRetry( fileID ): """ Update the retry count for supplied file ID. """ gLogger.info( "DataIntegrityHandler.incrementProblematicRetry: Incrementing retries for file %s." % ( fileID ) ) res = gDataIntegrityDB.incrementProblematicRetry( fileID ) if not res['OK']: gLogger.error( "DataIntegrityHandler.incrementProblematicRetry: Failed to increment retries.", res['Message'] ) return res types_insertProblematic = [StringTypes, DictType] @staticmethod def export_insertProblematic( source, fileMetadata ): """ Insert problematic files into the problematics table of the IntegrityDB """ gLogger.info( "DataIntegrityHandler.insertProblematic: Inserting problematic file to integrity DB." ) res = gDataIntegrityDB.insertProblematic( source, fileMetadata ) if not res['OK']: gLogger.error( "DataIntegrityHandler.insertProblematic: Failed to insert.", res['Message'] ) return res types_changeProblematicPrognosis = [] @staticmethod def export_changeProblematicPrognosis( fileID, newPrognosis ): """ Change the prognosis for the supplied file """ gLogger.info( "DataIntegrityHandler.changeProblematicPrognosis: Changing problematic prognosis." ) res = gDataIntegrityDB.changeProblematicPrognosis( fileID, newPrognosis ) if not res['OK']: gLogger.error( "DataIntegrityHandler.changeProblematicPrognosis: Failed to update.", res['Message'] ) return res types_getTransformationProblematics = [ [IntType, LongType] ] @staticmethod def export_getTransformationProblematics( transID ): """ Get the problematics for a given transformation """ gLogger.info( "DataIntegrityHandler.getTransformationProblematics: Getting problematics for transformation." ) res = gDataIntegrityDB.getTransformationProblematics( transID ) if not res['OK']: gLogger.error( "DataIntegrityHandler.getTransformationProblematics: Failed.", res['Message'] ) return res types_getProblematicsSummary = [] @staticmethod def export_getProblematicsSummary(): """ Get a summary from the Problematics table from the IntegrityDB """ gLogger.info( "DataIntegrityHandler.getProblematicsSummary: Getting problematics summary." ) res = gDataIntegrityDB.getProblematicsSummary() if res['OK']: for prognosis, statusDict in res['Value'].items(): gLogger.info( "DataIntegrityHandler.getProblematicsSummary: %s." % prognosis ) for status, count in statusDict.items(): gLogger.info( "DataIntegrityHandler.getProblematicsSummary: \t%-10s %-10s." % ( status, str( count ) ) ) else: gLogger.error( "DataIntegrityHandler.getProblematicsSummary: Failed to get summary.", res['Message'] ) return res types_getDistinctPrognosis = [] @staticmethod def export_getDistinctPrognosis(): """ Get a list of the distinct prognosis from the IntegrityDB """ gLogger.info( "DataIntegrityHandler.getDistinctPrognosis: Getting distinct prognosis." ) res = gDataIntegrityDB.getDistinctPrognosis() if res['OK']: for prognosis in res['Value']: gLogger.info( "DataIntegrityHandler.getDistinctPrognosis: \t%s." % prognosis ) else: gLogger.error( "DataIntegrityHandler.getDistinctPrognosis: Failed to get unique prognosis.", res['Message'] ) return res
gpl-3.0
6,767,732,409,155,483,000
40.06135
120
0.732706
false
Jumpscale/jumpscale_core8
lib/JumpScale/data/hash/HashTool.py
1
7832
from JumpScale import j # import ExtraTools import hashlib import binascii try: from pyblake2 import blake2b except: rc, out = j.sal.process.execute("pip3 install pyblake2", die=True, outputToStdout=False, ignoreErrorOutput=False) class HashTool: def __init__(self): self.__jslocation__ = "j.data.hash" def hashDir(self, rootpath): """ walk over all files, calculate md5 and of sorted list also calc md5 this is the resulting hash for the dir independant from time and other metadata (appart from path) """ paths = j.sal.fs.listFilesInDir( rootpath, recursive=True, followSymlinks=False) if paths == []: return "", "" paths2 = [] for path in paths: path2 = path.replace(rootpath, "") if path2[0] == "/": path2 = path2[1:] paths2.append(path2) paths2.sort() out = "" for path2 in paths2: realpath = j.sal.fs.joinPaths(rootpath, path2) if not j.core.platformtype.myplatform.isWindows() or not j.sal.windows.checkFileToIgnore(realpath): # print "realpath %s %s" % (rootpath,path2) hhash = j.data.hash.md5(realpath) out += "%s|%s\n" % (hhash, path2) import hashlib if isinstance(out, str): out = out.encode('utf-8') impl = hashlib.md5(out) return impl.hexdigest(), out def hex2bin(self, hex): """ output of the hash functions are string representation, when you need a smaller representation you can go to binary """ return binascii.unhexlify(hex) def bin2hex(self, bin): """ output of the hash functions are string representation, when you need a smaller representation you can go to binary """ return binascii.hexlify(bin) import zlib def _hash_funcs(alg): '''Function generator for hashlib-compatible hashing implementations''' template_data = {'alg': alg.upper(), } def _string(s): '''Calculate %(alg)s hash of input string @param s: String value to hash @type s: string @returns: %(alg)s hash hex digest of the input value @rtype: string ''' if isinstance(s, str): s = s.encode('utf-8') impl = hashlib.new(alg, s) return impl.hexdigest() # def _bin(s): # '''Calculate %(alg)s hash of input string (can be binary) # # @param s: String value to hash # @type s: string # # @returns: %(alg)s hash digest of the input value # @rtype: bin # ''' # if isinstance(s, str): # s = s.encode('utf-8') # impl = hashlib.new(alg, s) # return impl.digest() # _string.__doc__ = _string.__doc__ % template_data def _fd(fd): '''Calculate %(alg)s hash of content available on an FD Blocks of the blocksize used by the hashing algorithm will be read from the given FD, which should be a file-like object (i.e. it should implement C{read(number)}). @param fd: FD to read @type fd: object @returns: %(alg)s hash hex digest of data available on C{fd} @rtype: string ''' impl = hashlib.new(alg) # We use the blocksize used by the hashing implementation. This will be # fairly small, maybe this should be raised if this ever becomes an # issue blocksize = impl.block_size while True: s = fd.read(blocksize) if not s: break impl.update(s) # Maybe one day this will help the GC del s return impl.hexdigest() # _fd.__doc__ = _fd.__doc__ % template_data def _file(path): '''Calculate %(alg)s hash of data available in a file The file will be opened in read/binary mode and blocks of the blocksize used by the hashing implementation will be read. @param path: Path to file to calculate content hash @type path: string @returns: %(alg)s hash hex digest of data available in the given file @rtype: string ''' with open(path, 'rb') as fd: return _fd(fd) # _file.__doc__ = _file.__doc__ % template_data return _string, _fd, _file # CRC32 is not supported by hashlib def crc32(s): '''Calculate CRC32 hash of input string @param s: String value to hash @type s: string @returns: CRC32 hash of the input value @rtype: number ''' return zlib.crc32(s) def crc32_fd(fd): '''Calculate CRC32 hash of content available on an FD Blocks of the blocksize used by the hashing algorithm will be read from the given FD, which should be a file-like object (i.e. it should implement C{read(number)}). @param fd: FD to read @type fd: object @returns: CRC32 hash of data available on C{fd} @rtype: number ''' data = fd.read() value = crc32(data) del data return value def crc32_file(path): '''Calculate CRC32 hash of data available in a file The file will be opened in read/binary mode and blocks of the blocksize used by the hashing implementation will be read. @param path: Path to file to calculate content hash @type path: string @returns: CRC32 hash of data available in the given file @rtype: number ''' with open(path, 'rb') as fd: return crc32_fd(fd) def blake2(s): '''Calculate blake2 hash of input string @param s: String value to hash @type s: string @returns: blake2 hash of the input value @rtype: number ''' if j.data.types.string.check(s): s = s.encode() h = blake2b(s) return h.hexdigest() def blake2_fd(fd): '''Calculate blake2 hash of content available on an FD Blocks of the blocksize used by the hashing algorithm will be read from the given FD, which should be a file-like object (i.e. it should implement C{read(number)}). @param fd: FD to read @type fd: object @returns: blake2 hash of data available on C{fd} @rtype: number ''' data = fd.read() value = blake2(data) del data return value def blake2_file(path): '''Calculate blake2 hash of data available in a file The file will be opened in read/binary mode and blocks of the blocksize used by the hashing implementation will be read. @param path: Path to file to calculate content hash @type path: string @returns: blake2 hash of data available in the given file @rtype: number ''' with open(path, 'rb') as fd: return blake2_fd(fd) # def hashMd5(s): # if isinstance(s, str): # s = s.encode('utf-8') # impl = hashlib.md5(s) # return impl.hexdigest() __all__ = list() # List of all supported algoritms SUPPORTED_ALGORITHMS = ['md5', 'sha1', 'sha256', 'sha512', ] # For every supported algorithm, create the associated hash functions and add # them to the module globals _glob = globals() for _alg in SUPPORTED_ALGORITHMS: _string, _fd, _file = _hash_funcs(_alg) _glob[_alg] = _string _glob['%s_fd' % _alg] = _fd _glob['%s_file' % _alg] = _file __all__.append('%s' % _alg) __all__.append('%s_fd' % _alg) __all__.append('%s_file' % _alg) SUPPORTED_ALGORITHMS.append('crc32') SUPPORTED_ALGORITHMS.append('blake2') __all__.extend(('crc32', 'crc32_fd', 'crc32_file', )) __all__.extend(('blake2', 'blake2_fd', 'blake2_file', )) SUPPORTED_ALGORITHMS = tuple(SUPPORTED_ALGORITHMS) for alg in SUPPORTED_ALGORITHMS: setattr(HashTool, '%s_string' % alg, staticmethod(_glob[alg])) setattr(HashTool, alg, staticmethod(_glob['%s_file' % alg]))
apache-2.0
-5,521,307,720,803,020,000
27.071685
174
0.600485
false
delicb/PyScripts
src/flat_folder.py
1
3941
#!/usr/bin/env python __author__ = "Bojan Delic <[email protected]>" __date__ = "Sep 13, 2010" import sys import os import shutil from optparse import OptionParser from utils import get_abs_folder # TODO: Add optin to exclude file names, extensions, folders etc. USAGE = "%prog <FOLDER>" ROOT = "" options = None FILE_LIST = [] def get_opt_parser(): parser = OptionParser(usage=USAGE, version="%prog 0.1") parser.add_option("-d", "--delete", dest="delete", action="store_true", default=False, help="Delete folders after flatting") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Don't print status messages to stdout") return parser # TODO: Replace this with proper logging def log(msg): if options.verbose: print(msg) def walk(visited_folders, dirname, fnames): '''Callback for os.path.walk ''' visited_folders.append(dirname) move_files(dirname, fnames) def move_files(dirname, fnames): '''Actually moves the files (fnames) from dirname to ROOT folder. ROOT is defined as parameter when program starts. ''' for file in fnames: dest_file = generate_unique_file_name(file) dest = os.path.join(ROOT, dest_file) f = os.path.join(dirname, file) if os.path.isfile(f): log("Moving file %s to %s" % (f, dest)) shutil.move(f, dest) def generate_unique_file_name(file): '''Generates unique file name based on given file name. If given file name is not in FILE_LIST (global variable) returns the same name. If it is in the FILE_LIST it appendes nubmer at the end of the file name, but before extension. If there are more then 2 files with the same name number at the end of the name will be incremented. So, if this function is called 3 times with parameter 'a.txt' results would be (in this order): a.txt, a(1).txt, a(2).txt . ''' if file not in FILE_LIST: FILE_LIST.append(file) return file i = 0 f = file while f in FILE_LIST: i += 1 name, ext = os.path.splitext(file) f = '%s(%d)%s' % (name, i, ext) FILE_LIST.append(f) return f def delete_folders(folders): '''Deletes folders passed. Parameter folders should be iterable that contains witch folders should be deleted.''' for folder in folders: log("Deleting folder %s" % folder) # NOTE: Ovde namerno korisnim os.rmdir umesto shutil.rmtree, jer # bi folderi trebalo da su prazni. Kad kasnije implementiram # exclude mehanizam ovo bi trebalo promeniti u shutil.rmtree os.rmdir(folder) def main(): global ROOT global options parser = get_opt_parser() (options, args) = parser.parse_args() if len(args) > 1: print("ERROR: Only one argument is allowed and that should be folder name") parser.print_help() sys.exit(2) if len(args) == 0: folder = "." else: folder = args[0] try: ROOT = get_abs_folder(folder) except ValueError, e: print str(e) sys.exit(1) VISITED_FOLDERS = [] os.path.walk(ROOT, walk, VISITED_FOLDERS) if options.delete: VISITED_FOLDERS.reverse() # ROOT bi trebao da bude samo na poslednjem mestu, ali posto su svi # podaci u njemu sad za svaki slucaj uklanjamo svaku pojavu while ROOT in VISITED_FOLDERS: try: VISITED_FOLDERS.remove(ROOT) except ValueError, e: # ne bi trebalo da se desi, ali nikad se ne zna print("Error occured (%s)" % str(e)) sys.exit(2) delete_folders(VISITED_FOLDERS) if __name__ == '__main__': main()
mit
-5,815,836,849,265,394,000
28.550388
83
0.59731
false
benmuresan/django_work
tango_with_django_project/rango/forms.py
1
1840
from django import forms from django.contrib.auth.models import User from rango.models import Page, Category, UserProfile class CategoryForm(forms.ModelForm): name = forms.CharField(max_length=128, help_text="Please enter the category name.") views = forms.IntegerField(widget=forms.HiddenInput(), initial=0) likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0) slug = forms.CharField(widget=forms.HiddenInput(), required=False) # An inline class to provide additional information on the form. class Meta: # Provide an association between the ModelForm and a model model = Category fields = ('name',) class PageForm(forms.ModelForm): title = forms.CharField(max_length=128, help_text="Please enter the title of the page.") url = forms.URLField(max_length=200, help_text="Please enter the URL of the page.") views = forms.IntegerField(widget=forms.HiddenInput(), initial=0) class Meta: # Provide an association between the ModelForm and a model model = Page # What fields do we want to include in our form? # This way we don't need every field in the model present. # Some fields may allow NULL values, so we may not want to include them... # Here, we are hiding the foreign key. # we can either exclude the category field from the form, exclude = ('category',) #or specify the fields to include (i.e. not include the category field) #fields = ('title', 'url', 'views') class UserForm(forms.ModelForm): password = forms.CharField(widget=forms.PasswordInput()) class Meta: model = User fields = ('username', 'email', 'password') class UserProfileForm(forms.ModelForm): class Meta: model = UserProfile fields = ('website', 'picture')
mit
539,592,929,100,364,800
39.021739
92
0.681522
false
shanestafford/moose
python/FactorySystem/Parser.py
3
3723
#!/usr/bin/python import os, sys, re import ParseGetPot, Factory from MooseObject import MooseObject from Warehouse import Warehouse """ Parser object for reading GetPot formatted files """ class Parser: def __init__(self, factory, warehouse): self.factory = factory self.warehouse = warehouse self.params_parsed = set() self.params_ignored = set() """ Parse the passed filename filling the warehouse with populated InputParameter objects Error codes: 0x00 - Success 0x01 - pyGetpot parsing error 0x02 - Unrecogonized Boolean key/value pair 0x04 - Missing required parameter """ def parse(self, filename): error_code = 0x00 try: root = ParseGetPot.readInputFile(filename) except: print "Parse Error: " + filename return 0x01 # Parse Error error_code = self._parseNode(filename, root) if len(self.params_ignored): print 'Warning detected when parsing file "' + os.path.join(os.getcwd(), filename) + '"' print ' Ignored Parameter(s): ', self.params_ignored return error_code def extractParams(self, filename, params, getpot_node): error_code = 0x00 full_name = getpot_node.fullName() # Populate all of the parameters of this test node # using the GetPotParser. We'll loop over the parsed node # so that we can keep track of ignored parameters as well local_parsed = set() for key, value in getpot_node.params.iteritems(): self.params_parsed.add(full_name + '/' + key) local_parsed.add(key) if key in params: if params.type(key) == list: params[key] = value.split(' ') else: if re.match('".*"', value): # Strip quotes params[key] = value[1:-1] else: # Prevent bool types from being stored as strings. This can lead to the # strange situation where string('False') evaluates to true... if params.isValid(key) and (type(params[key]) == type(bool())): # We support using the case-insensitive strings {true, false} and the string '0', '1'. if (value.lower()=='true') or (value=='1'): params[key] = True elif (value.lower()=='false') or (value=='0'): params[key] = False else: print "Unrecognized (key,value) pair: (", key, ',', value, ")" return 0x02 # Otherwise, just do normal assignment else: params[key] = value else: self.params_ignored.add(key) # Make sure that all required parameters are supplied required_params_missing = params.required_keys() - local_parsed if len(required_params_missing): print 'Error detected when parsing file "' + os.path.join(os.getcwd(), filename) + '"' print ' Required Missing Parameter(s): ', required_params_missing error_code = 0x04 # Missing required params return error_code # private: def _parseNode(self, filename, node): error_code = 0x00 if 'type' in node.params: moose_type = node.params['type'] # Get the valid Params for this type params = self.factory.validParams(moose_type) # Extract the parameters from the Getpot node error_code = error_code | self.extractParams(filename, params, node) # Build the object moose_object = self.factory.create(moose_type, node.name, params) # Put it in the warehouse self.warehouse.addObject(moose_object) # Loop over the section names and parse them for child in node.children_list: error_code = error_code | self._parseNode(filename, node.children[child]) return error_code
lgpl-2.1
6,957,539,235,142,895,000
32.241071
100
0.628525
false
spraints/for-example
hgext/convert/cvsps.py
4
31758
# Mercurial built-in replacement for cvsps. # # Copyright 2008, Frank Kingswood <[email protected]> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. import os import re import cPickle as pickle from mercurial import util from mercurial.i18n import _ from mercurial import hook from mercurial import util class logentry(object): '''Class logentry has the following attributes: .author - author name as CVS knows it .branch - name of branch this revision is on .branches - revision tuple of branches starting at this revision .comment - commit message .commitid - CVS commitid or None .date - the commit date as a (time, tz) tuple .dead - true if file revision is dead .file - Name of file .lines - a tuple (+lines, -lines) or None .parent - Previous revision of this entry .rcs - name of file as returned from CVS .revision - revision number as tuple .tags - list of tags on the file .synthetic - is this a synthetic "file ... added on ..." revision? .mergepoint - the branch that has been merged from (if present in rlog output) or None .branchpoints - the branches that start at the current entry or empty ''' def __init__(self, **entries): self.synthetic = False self.__dict__.update(entries) def __repr__(self): items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) return "%s(%s)"%(type(self).__name__, ", ".join(items)) class logerror(Exception): pass def getrepopath(cvspath): """Return the repository path from a CVS path. >>> getrepopath('/foo/bar') '/foo/bar' >>> getrepopath('c:/foo/bar') '/foo/bar' >>> getrepopath(':pserver:10/foo/bar') '/foo/bar' >>> getrepopath(':pserver:10c:/foo/bar') '/foo/bar' >>> getrepopath(':pserver:/foo/bar') '/foo/bar' >>> getrepopath(':pserver:c:/foo/bar') '/foo/bar' >>> getrepopath(':pserver:[email protected]:/foo/bar') '/foo/bar' >>> getrepopath(':pserver:[email protected]:c:/foo/bar') '/foo/bar' >>> getrepopath('user@server/path/to/repository') '/path/to/repository' """ # According to CVS manual, CVS paths are expressed like: # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository # # CVSpath is splitted into parts and then position of the first occurrence # of the '/' char after the '@' is located. The solution is the rest of the # string after that '/' sign including it parts = cvspath.split(':') atposition = parts[-1].find('@') start = 0 if atposition != -1: start = atposition repopath = parts[-1][parts[-1].find('/', start):] return repopath def createlog(ui, directory=None, root="", rlog=True, cache=None): '''Collect the CVS rlog''' # Because we store many duplicate commit log messages, reusing strings # saves a lot of memory and pickle storage space. _scache = {} def scache(s): "return a shared version of a string" return _scache.setdefault(s, s) ui.status(_('collecting CVS rlog\n')) log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use re_00 = re.compile('RCS file: (.+)$') re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') re_02 = re.compile('cvs (r?log|server): (.+)\n$') re_03 = re.compile("(Cannot access.+CVSROOT)|" "(can't create temporary directory.+)$") re_10 = re.compile('Working file: (.+)$') re_20 = re.compile('symbolic names:') re_30 = re.compile('\t(.+): ([\\d.]+)$') re_31 = re.compile('----------------------------$') re_32 = re.compile('=======================================' '======================================$') re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' r'(\s+commitid:\s+([^;]+);)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS if directory is None: # Current working directory # Get the real directory in the repository try: prefix = open(os.path.join('CVS','Repository')).read().strip() directory = prefix if prefix == ".": prefix = "" except IOError: raise logerror(_('not a CVS sandbox')) if prefix and not prefix.endswith(os.sep): prefix += os.sep # Use the Root file in the sandbox, if it exists try: root = open(os.path.join('CVS','Root')).read().strip() except IOError: pass if not root: root = os.environ.get('CVSROOT', '') # read log cache if one exists oldlog = [] date = None if cache: cachedir = os.path.expanduser('~/.hg.cvsps') if not os.path.exists(cachedir): os.mkdir(cachedir) # The cvsps cache pickle needs a uniquified name, based on the # repository location. The address may have all sort of nasties # in it, slashes, colons and such. So here we take just the # alphanumeric characters, concatenated in a way that does not # mix up the various components, so that # :pserver:user@server:/path # and # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) for e in oldlog: if not (util.safehasattr(e, 'branchpoints') and util.safehasattr(e, 'commitid') and util.safehasattr(e, 'mergepoint')): ui.status(_('ignoring old cache\n')) oldlog = [] break ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception, e: ui.note(_('error reading cache: %r\n') % e) if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2') # build the CVS commandline cmd = ['cvs', '-q'] if root: cmd.append('-d%s' % root) p = util.normpath(getrepopath(root)) if not p.endswith('/'): p += '/' if prefix: # looks like normpath replaces "" by "." prefix = p + util.normpath(prefix) else: prefix = p cmd.append(['log', 'rlog'][rlog]) if date: # no space between option and date string cmd.append('-d>%s' % date) cmd.append(directory) # state machine begins here tags = {} # dictionary of revisions on current file with their tags branchmap = {} # mapping between branch names and revision numbers state = 0 store = False # set when a new record can be appended cmd = [util.shellquote(arg) for arg in cmd] ui.note(_("running %s\n") % (' '.join(cmd))) ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root)) pfp = util.popen(' '.join(cmd)) peek = pfp.readline() while True: line = peek if line == '': break peek = pfp.readline() if line.endswith('\n'): line = line[:-1] #ui.debug('state=%d line=%r\n' % (state, line)) if state == 0: # initial state, consume input until we see 'RCS file' match = re_00.match(line) if match: rcs = match.group(1) tags = {} if rlog: filename = util.normpath(rcs[:-2]) if filename.startswith(prefix): filename = filename[len(prefix):] if filename.startswith('/'): filename = filename[1:] if filename.startswith('Attic/'): filename = filename[6:] else: filename = filename.replace('/Attic/', '/') state = 2 continue state = 1 continue match = re_01.match(line) if match: raise logerror(match.group(1)) match = re_02.match(line) if match: raise logerror(match.group(2)) if re_03.match(line): raise logerror(line) elif state == 1: # expect 'Working file' (only when using log instead of rlog) match = re_10.match(line) assert match, _('RCS file must be followed by working file') filename = util.normpath(match.group(1)) state = 2 elif state == 2: # expect 'symbolic names' if re_20.match(line): branchmap = {} state = 3 elif state == 3: # read the symbolic names and store as tags match = re_30.match(line) if match: rev = [int(x) for x in match.group(2).split('.')] # Convert magic branch number to an odd-numbered one revn = len(rev) if revn > 3 and (revn % 2) == 0 and rev[-2] == 0: rev = rev[:-2] + rev[-1:] rev = tuple(rev) if rev not in tags: tags[rev] = [] tags[rev].append(match.group(1)) branchmap[match.group(1)] = match.group(2) elif re_31.match(line): state = 5 elif re_32.match(line): state = 0 elif state == 4: # expecting '------' separator before first revision if re_31.match(line): state = 5 else: assert not re_32.match(line), _('must have at least ' 'some revisions') elif state == 5: # expecting revision number and possibly (ignored) lock indication # we create the logentry here from values stored in states 0 to 4, # as this state is re-entered for subsequent revisions of a file. match = re_50.match(line) assert match, _('expected revision number') e = logentry(rcs=scache(rcs), file=scache(filename), revision=tuple([int(x) for x in match.group(1).split('.')]), branches=[], parent=None, commitid=None, mergepoint=None, branchpoints=set()) state = 6 elif state == 6: # expecting date, author, state, lines changed match = re_60.match(line) assert match, _('revision must be followed by date line') d = match.group(1) if d[2] == '/': # Y2K d = '19' + d if len(d.split()) != 3: # cvs log dates always in GMT d = d + ' UTC' e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S']) e.author = scache(match.group(2)) e.dead = match.group(3).lower() == 'dead' if match.group(5): if match.group(6): e.lines = (int(match.group(5)), int(match.group(6))) else: e.lines = (int(match.group(5)), 0) elif match.group(6): e.lines = (0, int(match.group(6))) else: e.lines = None if match.group(7): # cvs 1.12 commitid e.commitid = match.group(8) if match.group(9): # cvsnt mergepoint myrev = match.group(10).split('.') if len(myrev) == 2: # head e.mergepoint = 'HEAD' else: myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]]) branches = [b for b in branchmap if branchmap[b] == myrev] assert len(branches) == 1, ('unknown branch: %s' % e.mergepoint) e.mergepoint = branches[0] e.comment = [] state = 7 elif state == 7: # read the revision numbers of branches that start at this revision # or store the commit log message otherwise m = re_70.match(line) if m: e.branches = [tuple([int(y) for y in x.strip().split('.')]) for x in m.group(1).split(';')] state = 8 elif re_31.match(line) and re_50.match(peek): state = 5 store = True elif re_32.match(line): state = 0 store = True else: e.comment.append(line) elif state == 8: # store commit log message if re_31.match(line): cpeek = peek if cpeek.endswith('\n'): cpeek = cpeek[:-1] if re_50.match(cpeek): state = 5 store = True else: e.comment.append(line) elif re_32.match(line): state = 0 store = True else: e.comment.append(line) # When a file is added on a branch B1, CVS creates a synthetic # dead trunk revision 1.1 so that the branch has a root. # Likewise, if you merge such a file to a later branch B2 (one # that already existed when the file was added on B1), CVS # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop # these revisions now, but mark them synthetic so # createchangeset() can take care of them. if (store and e.dead and e.revision[-1] == 1 and # 1.1 or 1.1.x.1 len(e.comment) == 1 and file_added_re.match(e.comment[0])): ui.debug('found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])) e.synthetic = True if store: # clean up the results and save in the log. store = False e.tags = sorted([scache(x) for x in tags.get(e.revision, [])]) e.comment = scache('\n'.join(e.comment)) revn = len(e.revision) if revn > 3 and (revn % 2) == 0: e.branch = tags.get(e.revision[:-1], [None])[0] else: e.branch = None # find the branches starting from this revision branchpoints = set() for branch, revision in branchmap.iteritems(): revparts = tuple([int(i) for i in revision.split('.')]) if len(revparts) < 2: # bad tags continue if revparts[-2] == 0 and revparts[-1] % 2 == 0: # normal branch if revparts[:-2] == e.revision: branchpoints.add(branch) elif revparts == (1, 1, 1): # vendor branch if revparts in e.branches: branchpoints.add(branch) e.branchpoints = branchpoints log.append(e) if len(log) % 100 == 0: ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n') log.sort(key=lambda x: (x.rcs, x.revision)) # find parent revisions of individual files versions = {} for e in log: branch = e.revision[:-1] p = versions.get((e.rcs, branch), None) if p is None: p = e.revision[:-2] e.parent = p versions[(e.rcs, branch)] = e.revision # update the log cache if cache: if log: # join up the old and new logs log.sort(key=lambda x: x.date) if oldlog and oldlog[-1].date >= log[0].date: raise logerror(_('log cache overlaps with new log entries,' ' re-run without cache.')) log = oldlog + log # write the new cachefile ui.note(_('writing cvs log cache %s\n') % cachefile) pickle.dump(log, open(cachefile, 'w')) else: log = oldlog ui.status(_('%d log entries\n') % len(log)) hook.hook(ui, None, "cvslog", True, log=log) return log class changeset(object): '''Class changeset has the following attributes: .id - integer identifying this changeset (list index) .author - author name as CVS knows it .branch - name of branch this changeset is on, or None .comment - commit message .commitid - CVS commitid or None .date - the commit date as a (time,tz) tuple .entries - list of logentry objects in this changeset .parents - list of one or two parent changesets .tags - list of tags on this changeset .synthetic - from synthetic revision "file ... added on branch ..." .mergepoint- the branch that has been merged from or None .branchpoints- the branches that start at the current entry or empty ''' def __init__(self, **entries): self.id = None self.synthetic = False self.__dict__.update(entries) def __repr__(self): items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) return "%s(%s)"%(type(self).__name__, ", ".join(items)) def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None): '''Convert log into changesets.''' ui.status(_('creating changesets\n')) # try to order commitids by date mindate = {} for e in log: if e.commitid: mindate[e.commitid] = min(e.date, mindate.get(e.commitid)) # Merge changesets log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment, x.author, x.branch, x.date, x.branchpoints)) changesets = [] files = set() c = None for i, e in enumerate(log): # Check if log entry belongs to the current changeset or not. # Since CVS is file-centric, two different file revisions with # different branchpoints should be treated as belonging to two # different changesets (and the ordering is important and not # honoured by cvsps at this point). # # Consider the following case: # foo 1.1 branchpoints: [MYBRANCH] # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2] # # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a # later version of foo may be in MYBRANCH2, so foo should be the # first changeset and bar the next and MYBRANCH and MYBRANCH2 # should both start off of the bar changeset. No provisions are # made to ensure that this is, in fact, what happens. if not (c and e.branchpoints == c.branchpoints and (# cvs commitids (e.commitid is not None and e.commitid == c.commitid) or (# no commitids, use fuzzy commit detection (e.commitid is None or c.commitid is None) and e.comment == c.comment and e.author == c.author and e.branch == c.branch and ((c.date[0] + c.date[1]) <= (e.date[0] + e.date[1]) <= (c.date[0] + c.date[1]) + fuzz) and e.file not in files))): c = changeset(comment=e.comment, author=e.author, branch=e.branch, date=e.date, entries=[], mergepoint=e.mergepoint, branchpoints=e.branchpoints, commitid=e.commitid) changesets.append(c) files = set() if len(changesets) % 100 == 0: t = '%d %s' % (len(changesets), repr(e.comment)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') c.entries.append(e) files.add(e.file) c.date = e.date # changeset date is date of latest commit in it # Mark synthetic changesets for c in changesets: # Synthetic revisions always get their own changeset, because # the log message includes the filename. E.g. if you add file3 # and file4 on a branch, you get four log entries and three # changesets: # "File file3 was added on branch ..." (synthetic, 1 entry) # "File file4 was added on branch ..." (synthetic, 1 entry) # "Add file3 and file4 to fix ..." (real, 2 entries) # Hence the check for 1 entry here. c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic # Sort files in each changeset def entitycompare(l, r): 'Mimic cvsps sorting order' l = l.file.split('/') r = r.file.split('/') nl = len(l) nr = len(r) n = min(nl, nr) for i in range(n): if i + 1 == nl and nl < nr: return -1 elif i + 1 == nr and nl > nr: return +1 elif l[i] < r[i]: return -1 elif l[i] > r[i]: return +1 return 0 for c in changesets: c.entries.sort(entitycompare) # Sort changesets by date odd = set() def cscmp(l, r, odd=odd): d = sum(l.date) - sum(r.date) if d: return d # detect vendor branches and initial commits on a branch le = {} for e in l.entries: le[e.rcs] = e.revision re = {} for e in r.entries: re[e.rcs] = e.revision d = 0 for e in l.entries: if re.get(e.rcs, None) == e.parent: assert not d d = 1 break for e in r.entries: if le.get(e.rcs, None) == e.parent: if d: odd.add((l, r)) d = -1 break return d changesets.sort(cscmp) # Collect tags globaltags = {} for c in changesets: for e in c.entries: for tag in e.tags: # remember which is the latest changeset to have this tag globaltags[tag] = c for c in changesets: tags = set() for e in c.entries: tags.update(e.tags) # remember tags only if this is the latest changeset to have it c.tags = sorted(tag for tag in tags if globaltags[tag] is c) # Find parent changesets, handle {{mergetobranch BRANCHNAME}} # by inserting dummy changesets with two parents, and handle # {{mergefrombranch BRANCHNAME}} by setting two parents. if mergeto is None: mergeto = r'{{mergetobranch ([-\w]+)}}' if mergeto: mergeto = re.compile(mergeto) if mergefrom is None: mergefrom = r'{{mergefrombranch ([-\w]+)}}' if mergefrom: mergefrom = re.compile(mergefrom) versions = {} # changeset index where we saw any particular file version branches = {} # changeset index where we saw a branch n = len(changesets) i = 0 while i < n: c = changesets[i] for f in c.entries: versions[(f.rcs, f.revision)] = i p = None if c.branch in branches: p = branches[c.branch] else: # first changeset on a new branch # the parent is a changeset with the branch in its # branchpoints such that it is the latest possible # commit without any intervening, unrelated commits. for candidate in xrange(i): if c.branch not in changesets[candidate].branchpoints: if p is not None: break continue p = candidate c.parents = [] if p is not None: p = changesets[p] # Ensure no changeset has a synthetic changeset as a parent. while p.synthetic: assert len(p.parents) <= 1, \ _('synthetic changeset cannot have multiple parents') if p.parents: p = p.parents[0] else: p = None break if p is not None: c.parents.append(p) if c.mergepoint: if c.mergepoint == 'HEAD': c.mergepoint = None c.parents.append(changesets[branches[c.mergepoint]]) if mergefrom: m = mergefrom.search(c.comment) if m: m = m.group(1) if m == 'HEAD': m = None try: candidate = changesets[branches[m]] except KeyError: ui.warn(_("warning: CVS commit message references " "non-existent branch %r:\n%s\n") % (m, c.comment)) if m in branches and c.branch != m and not candidate.synthetic: c.parents.append(candidate) if mergeto: m = mergeto.search(c.comment) if m: if m.groups(): m = m.group(1) if m == 'HEAD': m = None else: m = None # if no group found then merge to HEAD if m in branches and c.branch != m: # insert empty changeset for merge cc = changeset( author=c.author, branch=m, date=c.date, comment='convert-repo: CVS merge from branch %s' % c.branch, entries=[], tags=[], parents=[changesets[branches[m]], c]) changesets.insert(i + 1, cc) branches[m] = i + 1 # adjust our loop counters now we have inserted a new entry n += 1 i += 2 continue branches[c.branch] = i i += 1 # Drop synthetic changesets (safe now that we have ensured no other # changesets can have them as parents). i = 0 while i < len(changesets): if changesets[i].synthetic: del changesets[i] else: i += 1 # Number changesets for i, c in enumerate(changesets): c.id = i + 1 if odd: for l, r in odd: if l.id is not None and r.id is not None: ui.warn(_('changeset %d is both before and after %d\n') % (l.id, r.id)) ui.status(_('%d changeset entries\n') % len(changesets)) hook.hook(ui, None, "cvschangesets", True, changesets=changesets) return changesets def debugcvsps(ui, *args, **opts): '''Read CVS rlog for current directory or named path in repository, and convert the log to changesets based on matching commit log entries and dates. ''' if opts["new_cache"]: cache = "write" elif opts["update_cache"]: cache = "update" else: cache = None revisions = opts["revisions"] try: if args: log = [] for d in args: log += createlog(ui, d, root=opts["root"], cache=cache) else: log = createlog(ui, root=opts["root"], cache=cache) except logerror, e: ui.write("%r\n"%e) return changesets = createchangeset(ui, log, opts["fuzz"]) del log # Print changesets (optionally filtered) off = len(revisions) branches = {} # latest version number in each branch ancestors = {} # parent branch for cs in changesets: if opts["ancestors"]: if cs.branch not in branches and cs.parents and cs.parents[0].id: ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch, cs.parents[0].id) branches[cs.branch] = cs.id # limit by branches if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]: continue if not off: # Note: trailing spaces on several lines here are needed to have # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') ui.write(('PatchSet %d \n' % cs.id)) ui.write(('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))) ui.write(('Author: %s\n' % cs.author)) ui.write(('Branch: %s\n' % (cs.branch or 'HEAD'))) ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], ','.join(cs.tags) or '(none)'))) if cs.branchpoints: ui.write(('Branchpoints: %s \n') % ', '.join(sorted(cs.branchpoints))) if opts["parents"] and cs.parents: if len(cs.parents) > 1: ui.write(('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))) else: ui.write(('Parent: %d\n' % cs.parents[0].id)) if opts["ancestors"]: b = cs.branch r = [] while b: b, c = ancestors[b] r.append('%s:%d:%d' % (b or "HEAD", c, branches[b])) if r: ui.write(('Ancestors: %s\n' % (','.join(r)))) ui.write(('Log:\n')) ui.write('%s\n\n' % cs.comment) ui.write(('Members: \n')) for f in cs.entries: fn = f.file if fn.startswith(opts["prefix"]): fn = fn[len(opts["prefix"]):] ui.write('\t%s:%s->%s%s \n' % ( fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL', '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead])) ui.write('\n') # have we seen the start tag? if revisions and off: if revisions[0] == str(cs.id) or \ revisions[0] in cs.tags: off = False # see if we reached the end tag if len(revisions) > 1 and not off: if revisions[1] == str(cs.id) or \ revisions[1] in cs.tags: break
gpl-2.0
-4,641,087,352,338,654,000
34.844244
80
0.491971
false
jjellis/marvin
Marvin/app/emboss.py
1
3536
import os from subprocess import Popen, PIPE import wx import wx.richtext as richtext class EmbossFrame(wx.Frame): def __init__(self, parent, topframe, id=wx.ID_ANY, title='EMBOSS'): wx.Frame.__init__(self, parent, id, title) self.topframe = topframe try: self.nucseq = topframe.builder.nucleotide_seq.tostring() self.protseq = topframe.builder.protein_seq.tostring() except: self.OnClose(None) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(wx.Button(self, id=501, label='REMAP'), 0, wx.ALL, 10) vbox.Add(wx.Button(self, id=502, label='RESTRICT'), 0, wx.LEFT | wx.RIGHT, 10) vbox.Add(wx.Button(self, id=wx.ID_CLOSE, label='Close'), 0, wx.ALL, 10) self.SetSizerAndFit(vbox) self.Centre() #self.Fit() self.Bind(wx.EVT_BUTTON, self.Remap, id=501) self.Bind(wx.EVT_BUTTON, self.Restrict, id=502) self.Bind(wx.EVT_BUTTON, self.OnClose, id=wx.ID_CLOSE) def OnClose(self, event): self.Destroy() def Remap(self, event): p = Popen(['remap', '-filter', '-stdout', '-auto'], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate(input=self.nucseq) if p.returncode == 0: e = EmbossOutputFrame(self, title='REMAP', output=stdout) e.Show(True) def Restrict(self, event): p = Popen(['restrict', '-filter', '-alphabetic', '-stdout', '-auto'], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate(input=self.nucseq) if p.returncode == 0: e = EmbossOutputFrame(self, title='RESTRICT', output=stdout, size=(900, 700)) e.Show(True) class EmbossOutputFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title='', size=(700, 700), output=None): wx.Frame.__init__(self, parent, id, title, size=size) save_bt = wx.Button(self, id=wx.ID_SAVE, label='Save') close_bt = wx.Button(self, id=wx.ID_CLOSE, label='Close') self.text = richtext.RichTextCtrl(self) self.text.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)) self.text.AppendText(output) self.text.SetBackgroundColour('WHITE') hbox = wx.BoxSizer(wx.HORIZONTAL) hbox.Add(save_bt, 0, wx.LEFT | wx.RIGHT, 5) hbox.Add(close_bt, 0, wx.LEFT | wx.RIGHT, 5) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(hbox, 0, wx.ALL, 10) vbox.Add(self.text, 1, wx.EXPAND) self.SetSizer(vbox) self.Centre() self.Bind(wx.EVT_BUTTON, self.OnClose, id=wx.ID_CLOSE) self.Bind(wx.EVT_BUTTON, self.OnSave, id=wx.ID_SAVE) def OnClose(self, event): self.Destroy() def OnSave(self, event): wild = 'Text files|*.txt' dia = wx.FileDialog(self, message='Save file as...', defaultDir=os.getcwd(), defaultFile='', wildcard=wild, style=wx.SAVE | wx.OVERWRITE_PROMPT) if dia.ShowModal() == wx.ID_OK: path = dia.GetPath() try: f_out = open(path, 'w') f_out.write(self.text.GetValue()) f_out.close() except IOError as err: pass dia.Destroy()
gpl-3.0
-6,765,850,251,319,605,000
32.358491
77
0.538179
false
alfa-addon/addon
plugin.video.alfa/servers/mediafire.py
2
1261
# -*- coding: utf-8 -*- from core import httptools from core import scrapertools from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Invalid or Deleted File" in data or "Well, looks like we" in data: return False, "[Mediafire] El archivo no existe o ha sido borrado" if "File Removed for Violation" in data: return False, "[Mediafire] Archivo eliminado por infracción" return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data patron = "DownloadButtonAd-startDownload gbtnSecondary.*?href='([^']+)'" matches = scrapertools.find_multiple_matches(data, patron) if len(matches) == 0: patron = 'Download file.*?href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) if len(matches) > 0: video_urls.append([matches[0][-4:] + " [mediafire]", matches[0]]) for video_url in video_urls: logger.info("%s - %s" % (video_url[0], video_url[1])) return video_urls
gpl-3.0
7,863,214,625,377,572,000
38.645161
84
0.63254
false
lmazuel/azure-sdk-for-python
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/user_secret_store.py
1
1123
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class UserSecretStore(Model): """Properties of a user's secret store. :param key_vault_uri: The URI of the user's Key vault. :type key_vault_uri: str :param key_vault_id: The ID of the user's Key vault. :type key_vault_id: str """ _attribute_map = { 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, 'key_vault_id': {'key': 'keyVaultId', 'type': 'str'}, } def __init__(self, key_vault_uri=None, key_vault_id=None): super(UserSecretStore, self).__init__() self.key_vault_uri = key_vault_uri self.key_vault_id = key_vault_id
mit
-6,176,011,524,582,688,000
34.09375
76
0.569012
false
chemelnucfin/tensorflow
tensorflow/lite/python/lite_mlir_test.py
1
20194
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for lite.py functionality related to MLIR-TFLite converter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.lite.python import lite from tensorflow.lite.python import lite_constants from tensorflow.lite.python.interpreter import Interpreter from tensorflow.python import keras from tensorflow.python.client import session from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.tracking import tracking class FromSessionTest(test_util.TensorFlowTestCase): def testFloat(self): with ops.Graph().as_default(): in_tensor = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32) out_tensor = in_tensor + in_tensor sess = session.Session() # Convert model and ensure model is not None. converter = lite.TFLiteConverter.from_session(sess, [in_tensor], [out_tensor]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual('Placeholder', input_details[0]['name']) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all()) self.assertEqual((0., 0.), input_details[0]['quantization']) output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual('add', output_details[0]['name']) self.assertEqual(np.float32, output_details[0]['dtype']) self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all()) self.assertEqual((0., 0.), output_details[0]['quantization']) def testString(self): with ops.Graph().as_default(): in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string) out_tensor = array_ops.reshape(in_tensor, shape=[2, 2]) sess = session.Session() # Convert model and ensure model is not None. converter = lite.TFLiteConverter.from_session(sess, [in_tensor], [out_tensor]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual('Placeholder', input_details[0]['name']) self.assertEqual(np.string_, input_details[0]['dtype']) self.assertTrue(([4] == input_details[0]['shape']).all()) output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual('Reshape', output_details[0]['name']) self.assertEqual(np.string_, output_details[0]['dtype']) self.assertTrue(([2, 2] == output_details[0]['shape']).all()) def testQuantization(self): with ops.Graph().as_default(): in_tensor_1 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA') in_tensor_2 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB') out_tensor = array_ops.fake_quant_with_min_max_args( in_tensor_1 + in_tensor_2, min=0., max=1., name='output') sess = session.Session() # Convert model and ensure model is not None. converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1, in_tensor_2], [out_tensor]) converter.experimental_enable_mlir_converter = True converter.inference_type = lite_constants.QUANTIZED_UINT8 converter.quantized_input_stats = { 'inputA': (0., 1.), 'inputB': (0., 1.) } # mean, std_dev tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(2, len(input_details)) self.assertEqual('inputA', input_details[0]['name']) self.assertEqual(np.uint8, input_details[0]['dtype']) self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all()) self.assertEqual((1., 0.), input_details[0]['quantization']) # scale, zero_point self.assertEqual('inputB', input_details[1]['name']) self.assertEqual(np.uint8, input_details[1]['dtype']) self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all()) self.assertEqual((1., 0.), input_details[1]['quantization']) # scale, zero_point output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual('add', output_details[0]['name']) self.assertEqual(np.uint8, output_details[0]['dtype']) self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all()) self.assertGreater(output_details[0]['quantization'][0], 0) # scale def testScalarValid(self): # Construct a graph using a scalar (empty shape) input. with ops.Graph().as_default(): in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[]) out_tensor = in_tensor + in_tensor sess = session.Session() # Test conversion with the scalar input shape. converter = lite.TFLiteConverter.from_session(sess, [in_tensor], [out_tensor]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual('Placeholder', input_details[0]['name']) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertEqual(len(input_details[0]['shape']), 0) output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual('add', output_details[0]['name']) self.assertEqual(np.float32, output_details[0]['dtype']) self.assertEqual(len(output_details[0]['shape']), 0) # Validate inference using the scalar inputs/outputs. test_input = np.array(4.0, dtype=np.float32) expected_output = np.array(8.0, dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], test_input) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) self.assertTrue((expected_output == output_data).all()) def testPostTrainingQuantize(self): self.skipTest('b/124315492') np.random.seed(0) with ops.Graph().as_default(): # We need the tensor to have more than 1024 elements for quantize_weights # to kick in. Thus, the [33, 33] shape. in_tensor_1 = array_ops.placeholder( shape=[33, 33], dtype=dtypes.float32, name='inputA') in_tensor_2 = constant_op.constant( np.random.uniform(low=-10., high=10., size=(33, 33)), shape=[33, 33], dtype=dtypes.float32, name='inputB') out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output') sess = session.Session() # Convert float model. float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1], [out_tensor]) float_converter.experimental_enable_mlir_converter = True float_tflite = float_converter.convert() # Convert quantized weights model. quantized_converter = lite.TFLiteConverter.from_session( sess, [in_tensor_1], [out_tensor]) quantized_converter.experimental_enable_mlir_converter = True quantized_converter.optimizations = [lite.Optimize.DEFAULT] quantized_tflite = quantized_converter.convert() # Ensure that the quantized weights tflite model is smaller. self.assertLess(len(quantized_tflite), len(float_tflite)) @test_util.run_in_graph_and_eager_modes def testFunctions(self): """Tests tf.function in 1.X.""" @def_function.function def plus_placeholder(x, placeholder): return x + placeholder with ops.Graph().as_default(): placeholder = array_ops.placeholder( dtype=dtypes.float32, shape=[1], name='input') variable_node = variables.Variable(1.0, name='variable_node') defun_node = plus_placeholder(variable_node, placeholder) output_node = math_ops.multiply(defun_node, 2.0, name='output_node') # Initialize variables in the model. sess = session.Session() sess.run(variables.variables_initializer([variable_node])) # Convert model and ensure model is not None. converter = lite.TFLiteConverter.from_session(sess, [placeholder], [output_node]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual('input', input_details[0]['name']) self.assertEqual(np.float32, input_details[0]['dtype']) self.assertTrue(([1] == input_details[0]['shape']).all()) self.assertEqual((0., 0.), input_details[0]['quantization']) output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual('output_node', output_details[0]['name']) self.assertEqual(np.float32, output_details[0]['dtype']) self.assertTrue(([1] == output_details[0]['shape']).all()) self.assertEqual((0., 0.), output_details[0]['quantization']) class FromConcreteFunctionTest(test_util.TensorFlowTestCase): def _evaluateTFLiteModel(self, tflite_model, input_data): """Evaluates the model on the `input_data`.""" interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() for input_tensor, tensor_data in zip(input_details, input_data): interpreter.set_tensor(input_tensor['index'], tensor_data.numpy()) interpreter.invoke() return [ interpreter.get_tensor(details['index']) for details in output_details ] def _getSimpleVariableModel(self): root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) return root @test_util.run_v2_only def testFloat(self): root = self._getSimpleVariableModel() input_data = constant_op.constant(1., shape=[1]) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. expected_value = root.f(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) self.assertEqual(expected_value.numpy(), actual_value) @test_util.run_v2_only def testControlFlow(self): input_data = { 'x': constant_op.constant([1., 2.], shape=[1, 2]), 'b': constant_op.constant(True) } weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32) def true_fn(x): return math_ops.matmul(x, weights) def false_fn(x): return math_ops.add(x, weights) @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32), tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool) ]) def model(x, b): return control_flow_ops.cond( b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x)) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(**input_data) actual_value = self._evaluateTFLiteModel( tflite_model, [input_data['x'], input_data['b']])[0] np.testing.assert_almost_equal(expected_value.numpy(), actual_value) @test_util.run_v2_only def testStaticRnn(self): input_data = constant_op.constant( np.array(np.random.random_sample((3, 10)), dtype=np.float32)) cell = rnn_cell_impl.LSTMCell(10) @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[3, 10], dtype=dtypes.float32) ]) def model(x): seq = array_ops.split(x, 3, 0) return rnn.static_rnn( cell, seq, dtype=dtypes.float32, sequence_length=[1]) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data)[0] actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for expected, actual in zip(expected_value, actual_value): np.testing.assert_almost_equal(expected.numpy(), actual) @test_util.run_v2_only def testLoop(self): input_data = constant_op.constant([1., 2., 3., 4.], shape=[2, 2]) weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32) def condition(x): return math_ops.reduce_sum(x) < 100 def body(x): return math_ops.add(x, weights) @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[2, 2], dtype=dtypes.float32) ]) def model(x): return control_flow_ops.while_loop(condition, body, [x]) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0] np.testing.assert_almost_equal(expected_value.numpy(), actual_value) @test_util.run_v2_only def testDynamicRnn(self): input_data = constant_op.constant( np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32)) cell = rnn_cell_impl.LSTMCell(10) @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[3, 10, 10], dtype=dtypes.float32) ]) def model(x): return rnn.dynamic_rnn(cell, x, dtype=dtypes.float32) concrete_func = model.get_concrete_function() # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for expected, actual in zip(expected_value, actual_value): if isinstance(expected, ops.EagerTensor): expected = expected.numpy() else: expected = expected.c.numpy() np.testing.assert_almost_equal(expected, actual) @test_util.run_v2_only def testKerasLSTM(self): self.skipTest('b/138657502') input_data = constant_op.constant( np.array(np.random.random_sample((10, 10, 10)), dtype=np.float32)) model = keras.models.Sequential( [keras.layers.LSTM(units=10, input_shape=(10, 10))]) run_model = def_function.function(model.__call__) concrete_func = run_model.get_concrete_function( tensor_spec.TensorSpec((10, 10, 10), dtype=dtypes.float32)) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True tflite_model = converter.convert() # Check values from converted model. expected_value = concrete_func(input_data) actual_value = self._evaluateTFLiteModel(tflite_model, [input_data]) for expected, actual in zip(expected_value, actual_value): np.testing.assert_almost_equal(expected, actual) class TestFlexMode(test_util.TensorFlowTestCase): def testSession(self): with ops.Graph().as_default(): in_tensor = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32) out_tensor = in_tensor + in_tensor sess = session.Session() # Convert model and ensure model is not None. converter = lite.TFLiteConverter.from_session(sess, [in_tensor], [out_tensor]) converter.experimental_enable_mlir_converter = True converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS]) tflite_model = converter.convert() # Ensures the model contains TensorFlow ops. # TODO(nupurgarg): Check values once there is a Python delegate interface. interpreter = Interpreter(model_content=tflite_model) with self.assertRaises(RuntimeError) as error: interpreter.allocate_tensors() self.assertIn( 'Regular TensorFlow ops are not supported by this interpreter.', str(error.exception)) @test_util.run_v2_only def testConcreteFunc(self): input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) concrete_func = root.f.get_concrete_function(input_data) # Convert model. converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func]) converter.experimental_enable_mlir_converter = True converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS]) tflite_model = converter.convert() # Ensures the model contains TensorFlow ops. # TODO(nupurgarg): Check values once there is a Python delegate interface. interpreter = Interpreter(model_content=tflite_model) with self.assertRaises(RuntimeError) as error: interpreter.allocate_tensors() self.assertIn( 'Regular TensorFlow ops are not supported by this interpreter.', str(error.exception)) if __name__ == '__main__': test.main()
apache-2.0
5,968,847,766,658,398,000
38.909091
80
0.674903
false
KnightHawk3/Hummingbird
hummingbird/__init__.py
1
7490
import json import requests from hummingbird.objects import * __version__ = '0.0.4' class Hummingbird(object): """Object for the wrapper for the Hummingbird API v1""" headers = {'content-type': 'application/json'} auth_token = '' api_url = "http://hummingbird.me/api/v1" def __init__(self, username, password): """Sets up the API, tests if your auth is valid. :param str username: Hummingbird username. :param str password: Hummingbird password. :returns: None :raises: ValueError -- If the Authentication is wrong """ self.auth_token = self.authenticate(username, password) def _query_(self, path, method, params={}): """Used internally for requests. :param str path: The path to hit. :param str method: The method to use, either `'GET'` or `'POST.` :param dict data: The optional paramters to the `GET` or the data to `POST`. :returns: Requests object -- Requires you to handle the status codes yourself. """ if method == "POST": url = '{API_URL}{API_PATH}'.format(API_URL=self.api_url, API_PATH=path) r = requests.post(url, data=json.dumps(params), headers=self.headers) return r elif method == "GET": url = '{API_URL}{API_PATH}'.format(API_URL=self.api_url, API_PATH=path) r = requests.get(url, params=params, headers=self.headers) return r def authenticate(self, username, password): """Authenticates your user and returns an auth token. :param str username: Hummingbird username. :param str password: Hummingbird password. :returns: str -- The Auth Token :raises: ValueError -- If the Authentication is wrong """ r = self._query_('/users/authenticate', 'POST', params={'username': username, 'password': password}) if r.status_code == 201: return r.text.strip('"') else: raise ValueError('Authentication invalid.') def get_anime(self, anime_id, title_language='canonical'): """Fetches the Anime Object of the given id or slug. :param anime_id: The Anime ID or Slug. :type anime_id: int or str :param str title_language: The PREFERED title language can be any of `'canonical'`, `'english'`, `'romanized'` :returns: Anime Object -- The Anime you requested. """ r = self._query_('/anime/%s' % anime_id, 'GET', params={'title_language_preference': title_language}) return Anime(r.json()) def search_anime(self, query): """Fuzzy searches the Anime Database for the query. :param str query: The text to fuzzy search. :returns: List of Anime Objects. This list can be empty. """ r = self._query_('/search/anime', 'GET', params={'query': query}) results = [Anime(item) for item in r.json()] return results def get_library(self, username, status=None): """Fetches a users library. :param str username: The user to get the library from. :param str status: only return the items with the supplied status. Can be one of `currently-watching`, `plan-to-watch`, `completed`, `on-hold` or `dropped`. :returns: List of Library objects. """ r = self._query_('/users/%s/library' % username, 'GET', params={'status': status}) results = [LibraryEntry(item) for item in r.json()] return results def get_user(self, username): """Get user information. :param str username: User to get info on. """ r = self._query_('/users/%s' % username, 'GET') result = User(r.json()) return result def get_feed(self, username): """Gets a user's feed. :param str username: User to fetch feed from. """ r = self._query_('/users/%s/feed' % username, 'GET') results = [Story(item) for item in r.json()] return results def get_favorites(self, username): """Get a user's favorite anime. :param str username: User to get favorites from. """ r = self._query_('/users/%s/favorite_anime' % username, 'GET') results = [Favorite(item) for item in r.json()] return results def update_entry(self, anime_id, status=None, privacy=None, rating=None, sane_rating_update=None, rewatched_times=None, notes=None, episodes_watched=None, increment_episodes=None): """Creates or updates the Library entry with the provided values. :param anime_id: The Anime ID or Slug. :type anime_id: int or str :param str auth_token: User authentication token. :param str status: Can be one of `'currently-watching'`, `'plan-to-watch'`, `'completed'`, `'on-hold'`, `'dropped'`. :param str privacy: Can be one of `'public'`, `'private'`. Making an entry private will hide it from public view. :param rating: Can be one of `0`, `0.5`, `1`, `1.5`, `2`, `2.5`, `3`, `3.5`, `4`, `4.5`, `5`. Setting it to the current value or 0 will remove the rating. :type rating: str, int or float :param sane_rating_update: Can be any one of the values for rating. Setting it to 0 will remove the rating. This should be used instead of rating if you don't want to unset the rating when setting it to its current value. :type sane_rating_update: str, int or float :param int rewatched_times: Number of rewatches. Can be 0 or above. :param str notes: The personal notes for the entry. :param int episodes_watched: Number of watched episodes. Can be between 0 and the total number of episodes. If equal to total number of episodes, status should be set to completed. :param bool increment_episodes: If set to true, increments watched episodes by one. If used along with episodes_watched, provided value will be incremented. :raises: ValueError -- if Authentication Token is invalid (it shouldn't be), or if there is a `500 Internal Server Error` or if the response is `Invalid JSON Object`. """ r = self._query_('/libraries/%s' % anime_id, 'POST', { 'auth_token': self.auth_token, 'status': status, 'privacy': privacy, 'rating': rating, 'sane_rating_update': sane_rating_update, 'rewatched_times': rewatched_times, 'notes': notes, 'episodes_watched': episodes_watched, 'increment_episodes': increment_episodes}) if not (r.status_code == 200 or r.status_code == 201): raise ValueError def remove_entry(self, anime_id): """Removes an entry from the user's library. :param anime_id: The Anime ID or slug. """ r = self._query_('libraries/%s/remove' % anime_id, 'POST') if not r.status_code == 200: raise ValueError
mit
8,201,267,218,918,231,000
35.359223
80
0.568758
false
ECP-CANDLE/Benchmarks
Pilot1/UnoMT/networks/structures/residual_block.py
1
1441
""" File Name: UnoPytorch/residual_block.py Author: Xiaotian Duan (xduan7) Email: [email protected] Date: 8/13/18 Python Version: 3.6.6 File Description: """ import torch.nn as nn from networks.initialization.weight_init import basic_weight_init class ResBlock(nn.Module): def __init__(self, layer_dim: int, num_layers: int, dropout: float): super(ResBlock, self).__init__() # Layer construction ################################################## self.block = nn.Sequential() for i in range(num_layers): self.block.add_module('res_dense_%d' % i, nn.Linear(layer_dim, layer_dim)) if dropout > 0.: self.block.add_module('res_dropout_%d' % i, nn.Dropout(dropout)) if i != (num_layers - 1): self.block.add_module('res_relu_%d' % i, nn.ReLU()) self.activation = nn.ReLU() # Weight Initialization ############################################### self.block.apply(basic_weight_init) def forward(self, x): return self.activation(self.block(x) + x) if __name__ == '__main__': res_block = ResBlock( layer_dim=200, num_layers=2, dropout=0.2) print(res_block)
mit
7,751,717,003,790,064,000
25.685185
79
0.469813
false
msunardi/blog
Control/sim_and_plot_pygame.py
4
10489
''' Copyright (C) 2015 Travis DeWolf This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import numpy as np import pygame import pygame.locals import sys class ArmPart: """ A class for storing relevant arm segment information. """ def __init__(self, pic, scale=1.0): self.base = pygame.image.load(pic) self.offset = self.base.get_rect()[2] / 2. * scale def rotate(self, rotation): """ Rotates and re-centers the arm segment. """ self.rotation = rotation # rotate our image image = pygame.transform.rotozoom(self.base, np.degrees(rotation), 1) # reset the center rect = image.get_rect() rect.center = np.zeros(2) return image, rect def transform(rect, base, arm_part): rect.center += np.asarray(base) rect.center += np.array([np.cos(arm_part.rotation) * arm_part.offset, -np.sin(arm_part.rotation) * arm_part.offset]) def transform_lines(rect, base, arm_part): transform(rect, base, arm_part) rect.center += np.array([-rect.width / 2.0, -rect.height / 2.0]) class Runner: """ A class for drawing the arm simulation using PyGame """ def __init__(self, title='', dt=1e-4, control_steps=10, display_steps=100, t_target=1.0, box=[-1,1,-1,1], rotate=0.0, control_type='', trajectory=None, infinite_trail=False, mouse_control=False): self.dt = dt self.control_steps = control_steps self.display_steps = display_steps self.target_steps = int(t_target/float(dt*display_steps)) self.trajectory = trajectory self.box = box self.control_type = control_type self.infinite_trail = infinite_trail self.mouse_control = mouse_control self.rotate = rotate self.title = title self.sim_step = 0 self.trail_index = -1 self.pen_lifted = False self.width = 642 self.height = 600 self.base_offset = np.array([self.width / 2.0, self.height*.9]) def run(self, arm, control_shell, video=None, video_time=None): self.arm = arm self.shell = control_shell # load arm images arm1 = ArmPart('img/three_link/svgupperarm2.png', scale = .7) arm2 = ArmPart('img/three_link/svgforearm2.png', scale = .8) arm3 = ArmPart('img/three_link/svghand2.png', scale= 1) scaling_term = np.ones(2) * 105 upperarm_length = self.arm.L[0] * scaling_term[0] forearm_length = self.arm.L[1] * scaling_term[0] hand_length = self.arm.L[2] * scaling_term[0] line_width = .15 * scaling_term[0] # create transparent arm lines line_upperarm_base = pygame.Surface((upperarm_length, line_width), pygame.SRCALPHA, 32) line_forearm_base = pygame.Surface((forearm_length, line_width), pygame.SRCALPHA, 32) line_hand_base = pygame.Surface((hand_length, line_width), pygame.SRCALPHA, 32) white = (255, 255, 255) red = (255, 0, 0) black = (0, 0, 0) arm_color = (75, 75, 75) line_color = (50, 50, 50, 200) # fourth value is transparency # color in transparent arm lines line_upperarm_base.fill(line_color) line_forearm_base.fill(line_color) line_hand_base.fill(line_color) fps = 20 # frames per second fpsClock = pygame.time.Clock() # constants for magnify plotting magnify_scale = 1.75 magnify_window_size = np.array([200, 200]) first_target = np.array([321, 330]) magnify_offset = first_target * magnify_scale - magnify_window_size / 2 # setup pen trail and appending functions self.trail_data = [] def pen_down1(): self.pen_lifted = False x,y = self.arm.position() x = int( x[-1] * scaling_term[0] + self.base_offset[0]) y = int(-y[-1] * scaling_term[1] + self.base_offset[1]) self.trail_data.append([[x,y],[x,y]]) self.trail_data[self.trail_index].append(points[3]) self.pen_down = pen_down2 def pen_down2(): self.trail_data[self.trail_index].append(points[3]) pygame.init() self.display = pygame.display.set_mode((self.width, self.height)) pygame.display.set_caption(self.title) background = pygame.image.load('img/whiteboard.jpg') # enter simulation / plotting loop while True: self.display.fill(white) self.target = self.shell.controller.target * np.array([1, -1]) * \ scaling_term + self.base_offset # before drawing for j in range(self.display_steps): # update control signal if self.sim_step % self.control_steps == 0 or \ 'tau' not in locals(): tau = self.shell.control(self.arm) # apply control signal and simulate self.arm.apply_torque(u=tau, dt=self.dt) self.sim_step +=1 # get (x,y) positions of the joints x,y = self.arm.position() points = [(int(a * scaling_term[0] + self.base_offset[0]), int(-b * scaling_term[1] + self.base_offset[1])) for a,b in zip(x,y)] arm1_image, arm1_rect = arm1.rotate(self.arm.q[0]) arm2_image, arm2_rect = arm2.rotate(self.arm.q[1] + arm1.rotation) arm3_image, arm3_rect = arm3.rotate(self.arm.q[2] + arm2.rotation) # recenter the image locations appropriately transform(arm1_rect, points[0], arm1) transform(arm2_rect, points[1], arm2) transform(arm3_rect, points[2], arm3) arm3_rect.center += np.array([np.cos(arm3.rotation), -np.sin(arm3.rotation)]) * -10 # transparent upperarm line line_upperarm = pygame.transform.rotozoom(line_upperarm_base, np.degrees(arm1.rotation), 1) rect_upperarm = line_upperarm.get_rect() transform_lines(rect_upperarm, points[0], arm1) # transparent forearm line line_forearm = pygame.transform.rotozoom(line_forearm_base, np.degrees(arm2.rotation), 1) rect_forearm = line_forearm.get_rect() transform_lines(rect_forearm, points[1], arm2) # transparent hand line line_hand = pygame.transform.rotozoom(line_hand_base, np.degrees(arm3.rotation), 1) rect_hand = line_hand.get_rect() transform_lines(rect_hand, points[2], arm3) # update trail if self.shell.pen_down is True: self.pen_down() elif self.shell.pen_down is False and self.pen_lifted is False: self.pen_down = pen_down1 self.pen_lifted = True self.trail_index += 1 # draw things! self.display.blit(background, (0,0)) # draw on the background for trail in self.trail_data: pygame.draw.aalines(self.display, black, False, trail, True) # draw arm images self.display.blit(arm1_image, arm1_rect) self.display.blit(arm2_image, arm2_rect) self.display.blit(arm3_image, arm3_rect) # draw original arm lines # pygame.draw.lines(self.display, arm_color, False, points, 18) # draw transparent arm lines self.display.blit(line_upperarm, rect_upperarm) self.display.blit(line_forearm, rect_forearm) self.display.blit(line_hand, rect_hand) # draw circles at shoulder pygame.draw.circle(self.display, black, points[0], 30) pygame.draw.circle(self.display, arm_color, points[0], 12) # draw circles at elbow pygame.draw.circle(self.display, black, points[1], 20) pygame.draw.circle(self.display, arm_color, points[1], 7) # draw circles at wrist pygame.draw.circle(self.display, black, points[2], 15) pygame.draw.circle(self.display, arm_color, points[2], 5) # draw target pygame.draw.circle(self.display, red, [int(val) for val in self.target], 10) # now display magnification of drawing area magnify = pygame.Surface(magnify_window_size) magnify.blit(background, (-200,-200)) # draw on the background # magnify.fill(white) # put a border on it pygame.draw.rect(magnify, black, (2.5, 2.5, 195, 195), 1) # now we need to rescale the trajectory and targets # using the first target position, which I know to be the # desired center of the magnify area for trail in self.trail_data: pygame.draw.aalines(magnify, black, False, np.asarray(trail) * magnify_scale - magnify_offset, True) pygame.draw.circle(magnify, red, np.array(self.target * magnify_scale - magnify_offset, dtype=int), 5) # now draw the target and hand line self.display.blit(magnify, (32, 45)) # check for quit for event in pygame.event.get(): if event.type == pygame.locals.QUIT: pygame.quit() sys.exit() pygame.display.update() fpsClock.tick(fps) def show(self): try: plt.show() except AttributeError: pass
gpl-3.0
-5,241,659,287,703,154,000
37.141818
103
0.567452
false
beni55/catkin
cmake/test/download_checkmd5.py
2
5716
#!/usr/bin/env python from __future__ import print_function import os import sys try: from urllib.request import addinfourl, BaseHandler, build_opener, Request, URLError except ImportError: from urllib2 import addinfourl, BaseHandler, build_opener, Request, URLError import hashlib from argparse import ArgumentParser NAME = "download_checkmd5.py" class HTTPRangeHandler(BaseHandler): def http_error_206(self, req, fp, code, msg, hdrs): r = addinfourl(fp, hdrs, req.get_full_url()) r.code = code r.msg = msg return r def http_error_416(self, req, fp, code, msg, hdrs): raise URLError('Requested Range Not Satisfiable') def download_with_resume(uri, dest): handler = HTTPRangeHandler() opener = build_opener(handler) offset = 0 content_length = None accept_ranges = False while True: req = Request(uri) if offset: req.add_header('Range', 'bytes=%d-' % offset) src_file = None try: src_file = opener.open(req) headers = src_file.info() if not offset: # on first connection check server capabilities if 'Content-Length' in headers: content_length = int(headers['Content-Length']) if 'Accept-Ranges' in headers: accept_ranges = headers['Accept-Ranges'] != 'none' else: # on resume verify that server understood range header and responded accordingly if 'Content-Range' not in headers: raise IOError('Download aborted and server does not support resuming download') if int(headers['Content-Range'][len('bytes '):].split('-')[0]) != offset: raise IOError('Download aborted because server replied with different content range then requested') sys.stdout.write(' resume from %d...' % offset) sys.stdout.flush() with open(dest, 'ab' if offset else 'wb') as dst_file: progress = False while True: data = src_file.read(8192) if not data: break progress = True dst_file.write(data) offset += len(data) if not progress: # if no bytes have been received abort download raise IOError("No progress when trying to download '%s'" % uri) except: if src_file: src_file.close() raise # when content length is unknown it is assumed that the download is complete if content_length is None: break # or when enough data has been downloaded (> is especially a valid case) if offset >= content_length: break if not accept_ranges: raise IOError('Server does not accept ranges to resume download') def download_md5(uri, dest): """ downloads file from uri to file dest """ # Create intermediate directories as necessary, #2970 dirname = os.path.dirname(dest) if len(dirname) and not os.path.exists(dirname): os.makedirs(dirname) sys.stdout.write('Downloading %s to %s...' % (uri, dest)) sys.stdout.flush() try: download_with_resume(uri, dest) sys.stdout.write(' done.\n') except Exception as e: # delete partially downloaded data if os.path.exists(dest): os.unlink(dest) sys.stdout.write(' failed (%s)!\n' % e) raise def checkmd5(dest, md5sum=None): """ checks file at dest against md5. :returns (boolean, hexdigest): True if dest contents matches md5sum """ if not os.path.exists(dest): return False, 'null' with open(dest, 'rb') as f: md5value = hashlib.md5() while True: buf = f.read(4096) if not buf: break md5value.update(buf) hexdigest = md5value.hexdigest() print('Checking md5sum on %s' % (dest)) return hexdigest == md5sum, hexdigest def main(argv=sys.argv[1:]): """ Dowloads URI to file dest and checks md5 if given. """ parser = ArgumentParser(description='Dowloads URI to file dest. If md5sum is given, checks md5sum. If file existed and mismatch, downloads and checks again') parser.add_argument('uri') parser.add_argument('dest') parser.add_argument('md5sum', nargs='?') parser.add_argument('--ignore-error', action='store_true', help='Ignore download errors') args = parser.parse_args(argv) uri = args.uri if '://' not in uri: uri = 'file://' + uri fresh = False if not os.path.exists(args.dest): try: download_md5(uri, args.dest) except Exception: if args.ignore_error: return 0 raise fresh = True if args.md5sum: result, hexdigest = checkmd5(args.dest, args.md5sum) if result is False and fresh is False: print('WARNING: md5sum mismatch (%s != %s); re-downloading file %s' % (hexdigest, args.md5sum, args.dest)) os.remove(args.dest) try: download_md5(uri, args.dest) except Exception: if args.ignore_error: return 0 raise result, hexdigest = checkmd5(args.dest, args.md5sum) if result is False: return 'ERROR: md5sum mismatch (%s != %s) on %s; aborting' % (hexdigest, args.md5sum, args.dest) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
4,726,529,626,388,738,000
32.623529
161
0.572778
false
chrisidefix/devide.johannes
extra/soappy-cvp/SOAPpy/wstools/XMLname.py
15
2529
"""Translate strings to and from SOAP 1.2 XML name encoding Implements rules for mapping application defined name to XML names specified by the w3 SOAP working group for SOAP version 1.2 in Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft 17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap> Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>. Author: Gregory R. Warnes <[email protected]> Date:: 2002-04-25 Version 0.9.0 """ ident = "$Id: XMLname.py,v 1.4 2005/02/16 14:45:37 warnes Exp $" from re import * def _NCNameChar(x): return x.isalpha() or x.isdigit() or x=="." or x=='-' or x=="_" def _NCNameStartChar(x): return x.isalpha() or x=="_" def _toUnicodeHex(x): hexval = hex(ord(x[0]))[2:] hexlen = len(hexval) # Make hexval have either 4 or 8 digits by prepending 0's if (hexlen==1): hexval = "000" + hexval elif (hexlen==2): hexval = "00" + hexval elif (hexlen==3): hexval = "0" + hexval elif (hexlen==4): hexval = "" + hexval elif (hexlen==5): hexval = "000" + hexval elif (hexlen==6): hexval = "00" + hexval elif (hexlen==7): hexval = "0" + hexval elif (hexlen==8): hexval = "" + hexval else: raise Exception, "Illegal Value returned from hex(ord(x))" return "_x"+ hexval + "_" def _fromUnicodeHex(x): return eval( r'u"\u'+x[2:-1]+'"' ) def toXMLname(string): """Convert string to a XML name.""" if string.find(':') != -1 : (prefix, localname) = string.split(':',1) else: prefix = None localname = string T = unicode(localname) N = len(localname) X = []; for i in range(N) : if i< N-1 and T[i]==u'_' and T[i+1]==u'x': X.append(u'_x005F_') elif i==0 and N >= 3 and \ ( T[0]==u'x' or T[0]==u'X' ) and \ ( T[1]==u'm' or T[1]==u'M' ) and \ ( T[2]==u'l' or T[2]==u'L' ): X.append(u'_xFFFF_' + T[0]) elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])): X.append(_toUnicodeHex(T[i])) else: X.append(T[i]) if prefix: return "%s:%s" % (prefix, u''.join(X)) return u''.join(X) def fromXMLname(string): """Convert XML name to unicode string.""" retval = sub(r'_xFFFF_','', string ) def fun( matchobj ): return _fromUnicodeHex( matchobj.group(0) ) retval = sub(r'_x[0-9A-Za-z]+_', fun, retval ) return retval
bsd-3-clause
5,062,211,323,455,546,000
27.1
78
0.553183
false
mottosso/maya-multicapture
multicapture.py
1
4677
import os import sys import shutil import tempfile import contextlib from PySide import QtGui from maya import ( cmds, OpenMaya as api, OpenMayaUI as apiUI ) class Window(QtGui.QDialog): def __init__(self, filename, parent=None): super(Window, self).__init__(parent) panel1 = QtGui.QLabel() panel2 = QtGui.QLabel() panel3 = QtGui.QLabel() panel4 = QtGui.QLabel() layout = QtGui.QGridLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(5) layout.addWidget(panel1, 0, 0) layout.addWidget(panel2, 0, 1) layout.addWidget(panel3, 1, 0) layout.addWidget(panel4, 1, 1) # Public members self.is_running = False self.filename = filename self.tempdir = tempfile.mkdtemp() self.panel1 = panel1 self.panel2 = panel2 self.panel3 = panel3 self.panel4 = panel4 QtGui.QShortcut(QtGui.QKeySequence("Escape"), self, self.stop) self.update() def snap(self): """Write four viewports to disk Returns: (list) paths to resulting 4 files """ images = list() for index in range(4): # Panels start at index 1 index += 1 cmds.setFocus("modelPanel%i" % index) # Force a refresh, otherwise the setting # of the focus might not have time to come # into effect, resulting in duplicate # captures from the below. cmds.refresh() view = apiUI.M3dView.active3dView() image = api.MImage() view.readColorBuffer( image, True # BGRA -> RGBA ) fname = os.path.join(self.tempdir, "temp_%i.jpg" % index) image.writeToFile(fname, "jpg") images.append(fname) return images def start(self): """Initiate capture""" self.is_running = True start_frame = int(cmds.playbackOptions(minTime=True, query=True)) end_frame = int(cmds.playbackOptions(maxTime=True, query=True)) for frame in range(start_frame, end_frame, 1): if not self.is_running: break cmds.currentTime(frame) self.update() self.save(frame) # Close once finished self.close() def stop(self): print("Stopping..") self.is_running = False def update(self): images = self.snap() for i, pane in enumerate([self.panel1, self.panel2, self.panel3, self.panel4]): pixmap = QtGui.QPixmap(images[i]) pane.setPixmap(pixmap) def save(self, frame): """Save the current frame to disk Arguments: frame (int): Number of current frame """ fname = self.filename % frame pixmap = QtGui.QPixmap.grabWidget(self) # Force a refresh of the file on disk. # Otherwise, it may pick up an older # version due to filesystem caching. try: os.remove(fname) except OSError: pass if pixmap.save(fname): print("Wrote %s" % fname) else: print("Could not write %s" % fname) self.is_running = False def closeEvent(self, event): try: shutil.rmtree(self.tempdir) except OSError: print("Had some problems cleaning up @ %s" % self.tempdir) super(Window, self).closeEvent(event) @contextlib.contextmanager def no_undo(): undo_is_on = cmds.undoInfo(state=True, query=True) if undo_is_on: try: cmds.undoInfo(state=False) yield finally: cmds.undoInfo(state=True) else: yield def capture(): """Initiate capture""" try: parent = next(w for w in QtGui.qApp.topLevelWidgets() if w.objectName() == "MayaWindow") except StopIteration: print("Main Maya window not found") parent = None scene = os.path.basename( cmds.file(sceneName=True, query=True) or "unsaved") name, _ = os.path.splitext(scene) workspace = cmds.workspace(fullName=True) fname = os.path.join(workspace, "images", name, "{}.%04d.jpg".format(name)) if not os.path.exists(os.path.dirname(fname)): os.makedirs(os.path.dirname(fname)) window = Window(fname, parent=parent) window.show() with no_undo(): window.start()
mit
3,182,195,300,037,787,600
24.557377
79
0.548856
false
Lektorium-LLC/edx-platform
cms/djangoapps/contentstore/views/certificates.py
6
22682
""" Certificates Data Model: course.certificates: { 'certificates': [ { 'version': 1, // data contract version 'id': 12345, // autogenerated identifier 'name': 'Certificate 1', 'description': 'Certificate 1 Description', 'course_title': 'course title', 'signatories': [ { 'id': 24680, // autogenerated identifier 'name': 'Dr. Bob Smith', 'title': 'Dean of the College', 'organization': 'Awesome College' } ] } ] } """ import json import logging from django.conf import settings from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.decorators.csrf import ensure_csrf_cookie from django.views.decorators.http import require_http_methods from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import AssetKey, CourseKey from contentstore.utils import get_lms_link_for_certificate_web_view, reverse_course_url from contentstore.views.assets import delete_asset from contentstore.views.exception import AssetNotFoundException from course_modes.models import CourseMode from edxmako.shortcuts import render_to_response from eventtracking import tracker from student.auth import has_studio_write_access from student.roles import GlobalStaff from util.db import MYSQL_MAX_INT, generate_int_id from util.json_request import JsonResponse from xmodule.modulestore import EdxJSONEncoder from xmodule.modulestore.django import modulestore CERTIFICATE_SCHEMA_VERSION = 1 CERTIFICATE_MINIMUM_ID = 100 LOGGER = logging.getLogger(__name__) def _get_course_and_check_access(course_key, user, depth=0): """ Internal method used to calculate and return the locator and course module for the view functions in this file. """ if not has_studio_write_access(user, course_key): raise PermissionDenied() course_module = modulestore().get_course(course_key, depth=depth) return course_module def _delete_asset(course_key, asset_key_string): """ Internal method used to create asset key from string and remove asset by calling delete_asset method of assets module. """ if asset_key_string: try: asset_key = AssetKey.from_string(asset_key_string) except InvalidKeyError: # remove first slash in asset path # otherwise it generates InvalidKeyError in case of split modulestore if '/' == asset_key_string[0]: asset_key_string = asset_key_string[1:] try: asset_key = AssetKey.from_string(asset_key_string) except InvalidKeyError: # Unable to parse the asset key, log and return LOGGER.info( "In course %r, unable to parse asset key %r, not attempting to delete signatory.", course_key, asset_key_string, ) return else: # Unable to parse the asset key, log and return LOGGER.info( "In course %r, unable to parse asset key %r, not attempting to delete signatory.", course_key, asset_key_string, ) return try: delete_asset(course_key, asset_key) # If the asset was not found, it doesn't have to be deleted... except AssetNotFoundException: pass # Certificates Exceptions class CertificateException(Exception): """ Base exception for Certificates workflows """ pass class CertificateValidationError(CertificateException): """ An exception raised when certificate information is invalid. """ pass class CertificateManager(object): """ The CertificateManager is responsible for storage, retrieval, and manipulation of Certificates Certificates are not stored in the Django ORM, they are a field/setting on the course descriptor """ @staticmethod def parse(json_string): """ Deserialize the provided JSON data into a standard Python object """ try: certificate = json.loads(json_string) except ValueError: raise CertificateValidationError(_("invalid JSON")) # Include the data contract version certificate["version"] = CERTIFICATE_SCHEMA_VERSION # Ensure a signatories list is always returned if certificate.get("signatories") is None: certificate["signatories"] = [] certificate["editing"] = False return certificate @staticmethod def validate(certificate_data): """ Ensure the certificate data contains all of the necessary fields and the values match our rules """ # Ensure the schema version meets our expectations if certificate_data.get("version") != CERTIFICATE_SCHEMA_VERSION: raise TypeError( "Unsupported certificate schema version: {0}. Expected version: {1}.".format( certificate_data.get("version"), CERTIFICATE_SCHEMA_VERSION ) ) if not certificate_data.get("name"): raise CertificateValidationError(_("must have name of the certificate")) @staticmethod def get_used_ids(course): """ Return a list of certificate identifiers that are already in use for this course """ if not course.certificates or not course.certificates.get('certificates'): return [] return [cert['id'] for cert in course.certificates['certificates']] @staticmethod def assign_id(course, certificate_data, certificate_id=None): """ Assign an identifier to the provided certificate data. If the caller did not provide an identifier, we autogenerate a unique one for them In addition, we check the certificate's signatories and ensure they also have unique ids """ used_ids = CertificateManager.get_used_ids(course) if certificate_id: certificate_data['id'] = int(certificate_id) else: certificate_data['id'] = generate_int_id( CERTIFICATE_MINIMUM_ID, MYSQL_MAX_INT, used_ids ) for index, signatory in enumerate(certificate_data['signatories']): # pylint: disable=unused-variable if signatory and not signatory.get('id', False): signatory['id'] = generate_int_id(used_ids=used_ids) used_ids.append(signatory['id']) return certificate_data @staticmethod def serialize_certificate(certificate): """ Serialize the Certificate object's locally-stored certificate data to a JSON representation We use direct access here for specific keys in order to enforce their presence """ certificate_data = certificate.certificate_data certificate_response = { "id": certificate_data['id'], "name": certificate_data['name'], "description": certificate_data['description'], "is_active": certificate_data['is_active'], "version": CERTIFICATE_SCHEMA_VERSION, "signatories": certificate_data['signatories'] } # Some keys are not required, such as the title override... if certificate_data.get('course_title'): certificate_response["course_title"] = certificate_data['course_title'] return certificate_response @staticmethod def deserialize_certificate(course, value): """ Deserialize from a JSON representation into a Certificate object. 'value' should be either a Certificate instance, or a valid JSON string """ # Ensure the schema fieldset meets our expectations for key in ("name", "description", "version"): if key not in value: raise CertificateValidationError(_("Certificate dict {0} missing value key '{1}'").format(value, key)) # Load up the Certificate data certificate_data = CertificateManager.parse(value) CertificateManager.validate(certificate_data) certificate_data = CertificateManager.assign_id(course, certificate_data, certificate_data.get('id', None)) certificate = Certificate(course, certificate_data) # Return a new Certificate object instance return certificate @staticmethod def get_certificates(course, only_active=False): """ Retrieve the certificates list from the provided course, if `only_active` is True it would skip inactive certificates. """ # The top-level course field is 'certificates', which contains various properties, # including the actual 'certificates' list that we're working with in this context certificates = course.certificates.get('certificates', []) if only_active: certificates = [certificate for certificate in certificates if certificate.get('is_active', False)] return certificates @staticmethod def remove_certificate(request, store, course, certificate_id): """ Remove certificate from the course """ for index, cert in enumerate(course.certificates['certificates']): if int(cert['id']) == int(certificate_id): certificate = course.certificates['certificates'][index] # Remove any signatory assets prior to dropping the entire cert record from the course for sig_index, signatory in enumerate(certificate.get('signatories')): # pylint: disable=unused-variable _delete_asset(course.id, signatory['signature_image_path']) # Now drop the certificate record course.certificates['certificates'].pop(index) store.update_item(course, request.user.id) break # pylint-disable: unused-variable @staticmethod def remove_signatory(request, store, course, certificate_id, signatory_id): """ Remove the specified signatory from the provided course certificate """ for cert_index, cert in enumerate(course.certificates['certificates']): # pylint: disable=unused-variable if int(cert['id']) == int(certificate_id): for sig_index, signatory in enumerate(cert.get('signatories')): if int(signatory_id) == int(signatory['id']): _delete_asset(course.id, signatory['signature_image_path']) del cert['signatories'][sig_index] store.update_item(course, request.user.id) break @staticmethod def track_event(event_name, event_data): """Track certificate configuration event. Arguments: event_name (str): Name of the event to be logged. event_data (dict): A Dictionary containing event data Returns: None """ event_name = '.'.join(['edx', 'certificate', 'configuration', event_name]) tracker.emit(event_name, event_data) class Certificate(object): """ The logical representation of an individual course certificate """ def __init__(self, course, certificate_data): """ Instantiate a Certificate object instance using the provided information. """ self.course = course self._certificate_data = certificate_data self.id = certificate_data['id'] # pylint: disable=invalid-name @property def certificate_data(self): """ Retrieve the locally-stored certificate data from the Certificate object via a helper method """ return self._certificate_data @login_required @require_http_methods(("POST",)) @ensure_csrf_cookie def certificate_activation_handler(request, course_key_string): """ A handler for Certificate Activation/Deactivation POST json: is_active. update the activation state of certificate """ course_key = CourseKey.from_string(course_key_string) store = modulestore() try: course = _get_course_and_check_access(course_key, request.user) except PermissionDenied: msg = _('PermissionDenied: Failed in authenticating {user}').format(user=request.user) return JsonResponse({"error": msg}, status=403) data = json.loads(request.body) is_active = data.get('is_active', False) certificates = CertificateManager.get_certificates(course) # for certificate activation/deactivation, we are assuming one certificate in certificates collection. for certificate in certificates: certificate['is_active'] = is_active break store.update_item(course, request.user.id) cert_event_type = 'activated' if is_active else 'deactivated' CertificateManager.track_event(cert_event_type, { 'course_id': unicode(course.id), }) return HttpResponse(status=200) @login_required @require_http_methods(("GET", "POST")) @ensure_csrf_cookie def certificates_list_handler(request, course_key_string): """ A RESTful handler for Course Certificates GET html: return Certificates list page (Backbone application) POST json: create new Certificate """ course_key = CourseKey.from_string(course_key_string) store = modulestore() with store.bulk_operations(course_key): try: course = _get_course_and_check_access(course_key, request.user) except PermissionDenied: msg = _('PermissionDenied: Failed in authenticating {user}').format(user=request.user) return JsonResponse({"error": msg}, status=403) if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'): certificate_url = reverse_course_url('certificates.certificates_list_handler', course_key) course_outline_url = reverse_course_url('course_handler', course_key) upload_asset_url = reverse_course_url('assets_handler', course_key) activation_handler_url = reverse_course_url( handler_name='certificates.certificate_activation_handler', course_key=course_key ) course_modes = [ mode.slug for mode in CourseMode.modes_for_course( course_id=course.id, include_expired=True ) if mode.slug != 'audit' ] has_certificate_modes = len(course_modes) > 0 if has_certificate_modes: certificate_web_view_url = get_lms_link_for_certificate_web_view( user_id=request.user.id, course_key=course_key, mode=course_modes[0] # CourseMode.modes_for_course returns default mode if doesn't find anyone. ) else: certificate_web_view_url = None certificates = None is_active = False if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False): certificates = CertificateManager.get_certificates(course) # we are assuming only one certificate in certificates collection. for certificate in certificates: is_active = certificate.get('is_active', False) break return render_to_response('certificates.html', { 'context_course': course, 'certificate_url': certificate_url, 'course_outline_url': course_outline_url, 'upload_asset_url': upload_asset_url, 'certificates': certificates, 'has_certificate_modes': has_certificate_modes, 'course_modes': course_modes, 'certificate_web_view_url': certificate_web_view_url, 'is_active': is_active, 'is_global_staff': GlobalStaff().has_user(request.user), 'certificate_activation_handler_url': activation_handler_url }) elif "application/json" in request.META.get('HTTP_ACCEPT'): # Retrieve the list of certificates for the specified course if request.method == 'GET': certificates = CertificateManager.get_certificates(course) return JsonResponse(certificates, encoder=EdxJSONEncoder) elif request.method == 'POST': # Add a new certificate to the specified course try: new_certificate = CertificateManager.deserialize_certificate(course, request.body) except CertificateValidationError as err: return JsonResponse({"error": err.message}, status=400) if course.certificates.get('certificates') is None: course.certificates['certificates'] = [] course.certificates['certificates'].append(new_certificate.certificate_data) response = JsonResponse(CertificateManager.serialize_certificate(new_certificate), status=201) response["Location"] = reverse_course_url( 'certificates.certificates_detail_handler', course.id, kwargs={'certificate_id': new_certificate.id} ) store.update_item(course, request.user.id) CertificateManager.track_event('created', { 'course_id': unicode(course.id), 'configuration_id': new_certificate.id }) course = _get_course_and_check_access(course_key, request.user) return response else: return HttpResponse(status=406) @login_required @ensure_csrf_cookie @require_http_methods(("POST", "PUT", "DELETE")) def certificates_detail_handler(request, course_key_string, certificate_id): """ JSON API endpoint for manipulating a course certificate via its internal identifier. Utilized by the Backbone.js 'certificates' application model POST or PUT json: update the specified certificate based on provided information DELETE json: remove the specified certificate from the course """ course_key = CourseKey.from_string(course_key_string) course = _get_course_and_check_access(course_key, request.user) certificates_list = course.certificates.get('certificates', []) match_index = None match_cert = None for index, cert in enumerate(certificates_list): if certificate_id is not None: if int(cert['id']) == int(certificate_id): match_index = index match_cert = cert store = modulestore() if request.method in ('POST', 'PUT'): if certificate_id: active_certificates = CertificateManager.get_certificates(course, only_active=True) if int(certificate_id) in [int(certificate["id"]) for certificate in active_certificates]: # Only global staff (PMs) are able to edit active certificate configuration if not GlobalStaff().has_user(request.user): raise PermissionDenied() try: new_certificate = CertificateManager.deserialize_certificate(course, request.body) except CertificateValidationError as err: return JsonResponse({"error": err.message}, status=400) serialized_certificate = CertificateManager.serialize_certificate(new_certificate) cert_event_type = 'created' if match_cert: cert_event_type = 'modified' certificates_list[match_index] = serialized_certificate else: certificates_list.append(serialized_certificate) store.update_item(course, request.user.id) CertificateManager.track_event(cert_event_type, { 'course_id': unicode(course.id), 'configuration_id': serialized_certificate["id"] }) return JsonResponse(serialized_certificate, status=201) elif request.method == "DELETE": if not match_cert: return JsonResponse(status=404) active_certificates = CertificateManager.get_certificates(course, only_active=True) if int(certificate_id) in [int(certificate["id"]) for certificate in active_certificates]: # Only global staff (PMs) are able to delete active certificate configuration if not GlobalStaff().has_user(request.user): raise PermissionDenied() CertificateManager.remove_certificate( request=request, store=store, course=course, certificate_id=certificate_id ) CertificateManager.track_event('deleted', { 'course_id': unicode(course.id), 'configuration_id': certificate_id }) return JsonResponse(status=204) @login_required @ensure_csrf_cookie @require_http_methods(("POST", "PUT", "DELETE")) def signatory_detail_handler(request, course_key_string, certificate_id, signatory_id): """ JSON API endpoint for manipulating a specific course certificate signatory via its internal identifier. Utilized by the Backbone 'certificates' application. DELETE json: Remove the specified signatory from the specified certificate """ course_key = CourseKey.from_string(course_key_string) store = modulestore() with store.bulk_operations(course_key): course = _get_course_and_check_access(course_key, request.user) certificates_list = course.certificates['certificates'] match_cert = None # pylint: disable=unused-variable for index, cert in enumerate(certificates_list): if certificate_id is not None: if int(cert['id']) == int(certificate_id): match_cert = cert if request.method == "DELETE": if not match_cert: return JsonResponse(status=404) CertificateManager.remove_signatory( request=request, store=store, course=course, certificate_id=certificate_id, signatory_id=signatory_id ) return JsonResponse(status=204)
agpl-3.0
-6,564,033,490,755,635,000
39.721724
121
0.624724
false
iAMr00t/opencog
opencog/python/blending/src/connector/connect_conflict_interaction_information.py
15
20117
from opencog.atomspace import * from opencog.logger import log from opencog.statistics import \ PyDataProviderAtom, PyProbabilityAtom, \ PyEntropyAtom, PyInteractionInformationAtom from blending.src.connector.base_connector import BaseConnector from blending.src.connector.connect_util import * import blending.src.connector.equal_link_key as eq_link from blending.util.blending_config import BlendConfig from blending.util.blending_error import blending_status __author__ = 'DongMin Kim' class ConnectConflictInteractionInformation(BaseConnector): """Link connector that connecting to new blend by checking the value of interaction information. This connector estimates the link set is most valuable, when it has the highest interaction information value. 1. Find the duplicate links, and the non-duplicate links. 2. Find the conflict links, and non-conflict links from duplicate links. 3. Find the target links in nodes related with decided nodes(blend source). -> Get the related nodes by searching InheritanceLink. 4. Make 2^k available conflict link cases if there exists k conflicts. 5. Calculate probabilities with target links. -> Create the probabilities from example cases in whole AtomSpace. 6. Calculate interaction information for each available conflict link cases. 7. Choose one case which has largest interaction information and connect to new blend. Attributes: check_type: A link type to check conflict. strength_diff_limit: A limit of difference between links strength value. confidence_above_limit: A threshold of both links confidence value. data_n_gram_limit: A max value of n_gram during probability generation. -1 means infinite. evaluate_n_gram_limit: A max value of n_gram during interaction information generation. -1 means infinite. inter_info_strength_above_limit: A max value of strength in TruthValue during interaction information generation. :type check_type: opencog.type_constructors.types :type strength_diff_limit: float :type confidence_above_limit: float :type data_n_gram_limit: int :type evaluate_n_gram_limit: int :type inter_info_strength_above_limit: float """ # TODO: Currently, this class can handle # when the number of decided atom is only 2. def __init__(self, a): super(self.__class__, self).__init__(a) self.check_type = None self.strength_diff_limit = None self.confidence_above_limit = None self.data_n_gram_limit = None self.evaluate_n_gram_limit = None self.inter_info_strength_above_limit = None def make_default_config(self): """Initialize a default config for this class.""" super(self.__class__, self).make_default_config() BlendConfig().update(self.a, "connect-check-type", "SimilarityLink") BlendConfig().update(self.a, "connect-strength-diff-limit", "0.3") BlendConfig().update(self.a, "connect-confidence-above-limit", "0.7") BlendConfig().update(self.a, "connect-data-n-gram-limit", "None") BlendConfig().update(self.a, "connect-evaluate-n-gram-limit", "None") BlendConfig().update( self.a, "connect-inter-info-strength-above-limit", "0.5") def __prepare_blended_atoms(self, merged_atom): """Prepare new blend atoms list if the number of result atom is expected to more than one. Args: merged_atom: The atom to connect new links. :param merged_atom: Atom """ self.ret = [merged_atom] def __get_max_n_gram( self, conflict_link_cases, non_conflict_link_cases, non_duplicate_link_cases, related_node_target_links ): """Decide the max value of n_gram, from every category link set. MAX( (USER DEFINED LIMIT), length of (related_node_target_link), length of (conflict_link + non_conflict_link + non_duplicate_link) ) Args: conflict_link_cases: Conflicted link tuples list. non_conflict_link_cases: Non-conflict links list. non_duplicate_link_cases: Non-duplicated links list. related_node_target_links: Target link tuples in related node list. :param conflict_link_cases: list[list[EqualLinkKey]] :param non_conflict_link_cases: list[EqualLinkKey] :param non_duplicate_link_cases: list[EqualLinkKey] :param related_node_target_links: list[list[EqualLinkKey]] Returns: The max value of n_gram. :rtype : int """ conflict_link_n_gram = 0 \ if len(conflict_link_cases) == 0 \ else len(conflict_link_cases[0]) merged_link_n_gram = \ conflict_link_n_gram + \ len(non_conflict_link_cases) + \ len(non_duplicate_link_cases) target_n_gram = list(map(lambda x: len(x), related_node_target_links)) target_n_gram.append(merged_link_n_gram) n_gram = self.data_n_gram_limit \ if 0 < self.data_n_gram_limit < max(target_n_gram) \ else max(target_n_gram) if n_gram == self.data_n_gram_limit: log.info( "ConnectConflictInteractionInformation: " "n_gram was limited to: " + str(self.data_n_gram_limit) + ", original n_gram was: " + str(max(target_n_gram)) ) return n_gram def __make_statistics_data_provider( self, conflict_link_cases, non_conflict_link_cases, non_duplicate_link_cases, related_node_target_links ): """Make the statistics data provider. Args: conflict_link_cases: Conflicted link tuples list. non_conflict_link_cases: Non-conflict links list. non_duplicate_link_cases: Non-duplicated links list. related_node_target_links: Target link tuples in related node list. :param conflict_link_cases: list[list[EqualLinkKey]] :param non_conflict_link_cases: list[EqualLinkKey] :param non_duplicate_link_cases: list[EqualLinkKey] :param related_node_target_links: list[list[EqualLinkKey]] """ self.provider = PyDataProviderAtom( self.__get_max_n_gram( conflict_link_cases, non_conflict_link_cases, non_duplicate_link_cases, related_node_target_links ), # Must be non-sorted information. False ) def __generate_information_probability( self, related_node_target_links ): """Calculate probabilities with target links. It creates the entropy and probabilities from related nodes in whole AtomSpace. Args: related_node_target_links: Target link tuples in related node list :param related_node_target_links: list[list[EqualLinkKey]] Returns: The max value of n_gram. :rtype : int """ log.debug( "ConnectConflictInteractionInformation: Calculating probabilities " "(Total: " + str(len(related_node_target_links)) + ")" ) current_ratio = 0 # Register the every link in related nodes to provider. for i, target_equal_link in enumerate(related_node_target_links): current_ratio = self.__print_progress( "ConnectConflictInteractionInformation:PROB:", current_ratio, i, len(related_node_target_links), 30 ) # TODO: To prevent freeze during probability generation, # user can limit the max value of calculation. max_repeat_length = self.provider.n_gram \ if 0 < self.provider.n_gram < len(target_equal_link) \ else len(target_equal_link) # Make n-gram data in each related node. # The provider with high n-gram will provides more correct data, # but speed will going slower rapidly. # (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), ... (1, 1, 1) cartesian_binary_iterator = \ itertools.product([False, True], repeat=max_repeat_length) for viable_case_binary in enumerate(cartesian_binary_iterator): gram_data = list() for j, selector in enumerate(viable_case_binary): # Make each gram data. if selector: gram_data.append(eq_link.key_to_link( self.a, target_equal_link[j], self.ret[0], target_equal_link[j].tv )) # Register the generated gram_data. self.provider.add_one_rawdata_count( [data for data in gram_data if data is not None], 1 ) # Update provider's statistic data. PyProbabilityAtom().calculate_probabilities(self.provider) PyEntropyAtom().calculate_entropies(self.provider) def __evaluate_interaction_information( self, decided_atoms, conflict_link_cases, non_conflict_link_cases, non_duplicate_link_cases, ): """Evaluate interaction information value for each available conflict link cases, and returns one link set has maximum information value. Args: decided_atoms: The source atoms to make new atom. conflict_link_cases: Conflicted link tuples list. non_conflict_link_cases: Non-conflict links list. non_duplicate_link_cases: Non-duplicated links list. :param decided_atoms: list[Atom] :param conflict_link_cases: list[list[EqualLinkKey]] :param non_conflict_link_cases: list[EqualLinkKey] :param non_duplicate_link_cases: list[EqualLinkKey] Returns: A link set has maximum interaction information value. :rtype: list[EqualLinkKey] """ result_list = list() inheritance_nodes = list() for decided_atom in decided_atoms: inheritance_nodes += find_inheritance_nodes(self.a, decided_atom) # TODO: To prevent freeze during interaction information generation, # user can limit the max value of calculation. max_repeat_length = self.evaluate_n_gram_limit \ if self.evaluate_n_gram_limit < self.provider.n_gram \ else self.provider.n_gram log.debug( "ConnectConflictInteractionInformation: " + "Calculating interaction information " + "(Total: " + str(len(conflict_link_cases)) + ")" ) current_ratio = 0 for i, conflict_link in enumerate(conflict_link_cases): current_ratio = self.__print_progress( "ConnectConflictInteractionInformation:II:", current_ratio, i, len(conflict_link_cases), 25 ) merged_links = list() merged_links.extend(non_conflict_link_cases) merged_links.extend(non_duplicate_link_cases) merged_links.extend(conflict_link) # Calculate n-gram data in each available link cases. # The provider with high n-gram will provides more correct data, # but speed will going slower rapidly. filtered_merged_links = \ map(lambda x: eq_link.key_to_link(self.a, x, self.ret[0], x.tv), filter(lambda x: # TODO: Currently connector excludes # an InheritanceLink to get valuable(funny) result. self.a[x.h].out[0] not in inheritance_nodes and # Manually throw away the links have low strength. (x.tv.mean > self.inter_info_strength_above_limit), merged_links ) ) if len(filtered_merged_links) is 0: continue interaction_information = PyInteractionInformationAtom(). \ calculate_interaction_information( filtered_merged_links, self.provider, max_repeat_length ) result_list.append({ "merged_links": merged_links, "filtered_merged_links": filtered_merged_links, "interaction_information": interaction_information }) if len(result_list) < 1: self.last_status = blending_status.EMPTY_RESULT return [] result_list = sorted( result_list, key=(lambda x: x["interaction_information"]), reverse=True ) self.__make_result_log(result_list, reverse=False) # self.__make_result_log(result_list, reverse=True) return result_list[0]['merged_links'] def __connect_links(self, best_interaction_information_link): """Connect selected links to new blend atom. Args: best_interaction_information_link: Selected link set. :param best_interaction_information_link: list[EqualLinkKey] """ for link in best_interaction_information_link: eq_link.key_to_link(self.a, link, self.ret[0], link.tv) def __connect_to_blended_atoms(self, decided_atoms): """Connect source atoms to new blend atom. Args: decided_atoms: The source atoms to make new atom. :param decided_atoms: list[Atom] """ # Make the links between source nodes and newly blended node. # TODO: Give proper truth value, not average of truthvalue. for merged_atom in self.ret: weighted_tv = get_weighted_tv(self.a.get_incoming(merged_atom.h)) for decided_atom in decided_atoms: self.a.add_link( types.AssociativeLink, [decided_atom, merged_atom], weighted_tv ) def __connect_best_interaction_information_conflict_links( self, decided_atoms, merged_atom ): """Actual algorithm for connecting links. Args: decided_atoms: The source atoms to make new atom. merged_atom: The atom to connect new links. :param decided_atoms: list[Atom] :param merged_atom: Atom """ # 1. Find the duplicate links, and the non-duplicate links. duplicate_links, non_duplicate_links = \ find_duplicate_links(self.a, decided_atoms) # 2. Find the conflict links, and non-conflict links from # duplicate links. conflict_links, non_conflict_links = \ find_conflict_links( self.a, duplicate_links, self.check_type, self.strength_diff_limit, self.confidence_above_limit ) # 3. Find the target links in nodes related with # decided nodes(blend source). # -> Get the related nodes by searching InheritanceLink. related_node_target_links = find_related_links( self.a, decided_atoms, self.inter_info_strength_above_limit ) # 4. Make 2^k available conflict link cases if there exists k conflicts. conflict_link_cases = make_conflict_link_cases(conflict_links) # 5. Calculate probabilities with target links. # -> Create the probabilities from example cases in whole AtomSpace. self.__prepare_blended_atoms(merged_atom) self.__make_statistics_data_provider( conflict_link_cases, non_conflict_links, non_duplicate_links, related_node_target_links ) self.__generate_information_probability( related_node_target_links ) # 6. Calculate interaction information for each available # conflict link cases. best_interaction_information_link = \ self.__evaluate_interaction_information( decided_atoms, conflict_link_cases, non_conflict_links, non_duplicate_links, ) # 7. Choose one case which has largest interaction information # and connect to new blend. self.__connect_links(best_interaction_information_link) self.__connect_to_blended_atoms(decided_atoms) def link_connect_impl(self, decided_atoms, merged_atom, config_base): """Implemented factory method to connecting links. Args: decided_atoms: The source atoms to make new atom. merged_atom: The atom to connect new links. config_base: A Node to save custom config. :param decided_atoms: list[Atom] :param merged_atom: Atom :param config_base: Atom """ check_type_str = BlendConfig().get_str( self.a, "connect-check-type", config_base ) strength_diff_threshold = BlendConfig().get_str( self.a, "connect-strength-diff-limit", config_base ) confidence_above_threshold = BlendConfig().get_str( self.a, "connect-confidence-above-limit", config_base ) data_n_gram_limit = BlendConfig().get_str( self.a, "connect-data-n-gram-limit", config_base ) evaluate_n_gram_limit = BlendConfig().get_str( self.a, "connect-evaluate-n-gram-limit", config_base ) inter_info_strength_above_limit = BlendConfig().get_str( self.a, "connect-inter-info-strength-above-limit", config_base ) # Check if given atom_type is valid or not. try: self.check_type = types.__dict__[check_type_str] except KeyError: self.check_type = types.Node # Check if given threshold value is valid or not. self.strength_diff_limit = float(strength_diff_threshold) self.confidence_above_limit = float(confidence_above_threshold) self.data_n_gram_limit = \ int(data_n_gram_limit) \ if data_n_gram_limit.isdigit()\ else -1 self.evaluate_n_gram_limit = \ int(evaluate_n_gram_limit) \ if evaluate_n_gram_limit.isdigit() \ else -1 self.inter_info_strength_above_limit = \ float(inter_info_strength_above_limit) self.__connect_best_interaction_information_conflict_links( decided_atoms, merged_atom ) def __print_progress( self, msg, current_ratio, current_count, total, step=10 ): # To print(debug) progress of evaluating. if current_ratio < current_count: current_ratio += total * step * 0.01 log.debug( msg + ": " + str(current_count) + "/" + str(total) + " (" + str(100 * current_count / float(total)) + "%)" ) return current_ratio def __make_result_log(self, result_list, reverse): # To print(debug) interaction information value. if reverse: result_list = reversed(result_list) for i, result in enumerate(result_list): name = "" # Prints only top 5 results. if i < 15: for link in result['filtered_merged_links']: for node in link.out: if node.t == types.ConceptNode and node != self.ret[0]: name += node.name + ", " log.debug(name + ": " + str(result['interaction_information']))
agpl-3.0
-250,953,747,012,042,080
39.476861
80
0.586519
false
magnastrazh/NEUCOGAR
RecNM/image_processing.py
3
2041
import root import os import numpy as np from pybrain.datasets import SupervisedDataSet __author__ = 'kamil' import cv2 def get_number_of_files(path): count = 0 for file in os.listdir(path): type = file.split(".")[1] if type == "DS_Store": continue count += 1 return count def get_cat_dog_trainset(): count = 0 images = os.listdir(root.path() + '/res/cats_proc/') shape = cv2.imread(root.path() + '/res/cats_proc/'+images[0],0).shape ds = SupervisedDataSet(shape[0]*shape[1], 2) for image in os.listdir(root.path() + '/res/cats_proc/'): img = cv2.imread(root.path() + '/res/cats_proc/'+image,0) inp = np.reshape(img, shape[0]*shape[1]) target = [1,0] ds.addSample(inp, target) count += 1 for image in os.listdir(root.path() + '/res/dogs_proc/'): img = cv2.imread(root.path() + '/res/dogs_proc/'+image,0) img = cv2.resize(img, img.shape, fx=0.5, fy=0.5) inp = np.reshape(img, shape[0]*shape[1]) target = [0,1] ds.addSample(inp, target) count += 1 return ds def get_cat_dog_testset(): count = 0 images = os.listdir(root.path() + '/res/cats_proc/') shape = cv2.imread(root.path() + '/res/cats_proc/'+images[0],0).shape ds = SupervisedDataSet(shape[0]*shape[1], 2) for image in os.listdir(root.path() + '/res/cats_proc/'): img = cv2.imread(root.path() + '/res/cats_proc/'+image,0) inp = np.reshape(img, shape[0]*shape[1]) target = [1,0] ds.addSample(inp, target) count += 1 for image in os.listdir(root.path() + '/res/dogs_proc/'): img = cv2.imread(root.path() + '/res/dogs_proc/'+image,0) img = cv2.resize(img, img.shape, fx=0.5, fy=0.5) inp = np.reshape(img, shape[0]*shape[1]) target = [0,1] ds.addSample(inp, target) count += 1 return ds # img = cv2.resize(img,(280, 280), interpolation = cv2.INTER_CUBIC) # cv2.imwrite(root.path()+"/images/proc.jpg", img)
gpl-2.0
5,613,655,091,397,096,000
34.189655
73
0.576188
false
allevato/swift
utils/gyb_syntax_support/Classification.py
13
2779
from .Node import error from .kinds import lowercase_first_word # noqa: I201 class SyntaxClassification(object): ''' Represents a classification a token can receive for syntax highlighting. ''' def __init__(self, name, description): self.name = name self.swift_name = lowercase_first_word(name) self.description = description SYNTAX_CLASSIFICATIONS = [ SyntaxClassification('None', description=''' The token should not receive syntax coloring. '''), SyntaxClassification('Keyword', description=''' A Swift keyword, including contextual keywords. '''), SyntaxClassification('Identifier', description=''' A generic identifier. '''), SyntaxClassification('TypeIdentifier', description=''' An identifier referring to a type. '''), SyntaxClassification('DollarIdentifier', description=''' An identifier starting with `$` like `$0`. '''), SyntaxClassification('IntegerLiteral', description=''' An integer literal. '''), SyntaxClassification('FloatingLiteral', description=''' A floating point literal. '''), SyntaxClassification('StringLiteral', description=''' A string literal including multiline string literals. '''), SyntaxClassification('StringInterpolationAnchor', description=''' The opening and closing paranthesis of string interpolation. '''), SyntaxClassification('PoundDirectiveKeyword', description=''' A `#` keyword like `#warning`. '''), SyntaxClassification('BuildConfigId', description=''' A build configuration directive like `#if`, `#elseif`, `#else`. '''), SyntaxClassification('Attribute', description=''' An attribute starting with an `@`. '''), SyntaxClassification('ObjectLiteral', description=''' An image, color, etc. literal. '''), SyntaxClassification('EditorPlaceholder', description=''' An editor placeholder of the form `<#content#>` '''), SyntaxClassification('LineComment', description=''' A line comment starting with `//`. '''), SyntaxClassification('DocLineComment', description=''' A doc line comment starting with `///`. '''), SyntaxClassification('BlockComment', description=''' A block comment starting with `/**` and ending with `*/. '''), SyntaxClassification('DocBlockComment', description=''' A doc block comment starting with `/**` and ending with `*/. '''), ] def classification_by_name(classification_name): if classification_name is None: return None for classification in SYNTAX_CLASSIFICATIONS: if classification.name == classification_name: return classification error("Unknown syntax classification '%s'" % classification_name)
apache-2.0
-833,870,276,065,859,100
33.7375
76
0.670385
false
KiChjang/servo
tests/wpt/web-platform-tests/service-workers/service-worker/resources/fetch-access-control.py
5
5045
import base64 import json import os import six from wptserve.utils import isomorphic_decode, isomorphic_encode def decodebytes(s): if six.PY3: return base64.decodebytes(six.ensure_binary(s)) return base64.decodestring(s) def main(request, response): headers = [] headers.append((b'X-ServiceWorker-ServerHeader', b'SetInTheServer')) if b"ACAOrigin" in request.GET: for item in request.GET[b"ACAOrigin"].split(b","): headers.append((b"Access-Control-Allow-Origin", item)) for suffix in [b"Headers", b"Methods", b"Credentials"]: query = b"ACA%s" % suffix header = b"Access-Control-Allow-%s" % suffix if query in request.GET: headers.append((header, request.GET[query])) if b"ACEHeaders" in request.GET: headers.append((b"Access-Control-Expose-Headers", request.GET[b"ACEHeaders"])) if (b"Auth" in request.GET and not request.auth.username) or b"AuthFail" in request.GET: status = 401 headers.append((b'WWW-Authenticate', b'Basic realm="Restricted"')) body = b'Authentication canceled' return status, headers, body if b"PNGIMAGE" in request.GET: headers.append((b"Content-Type", b"image/png")) body = decodebytes(b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAARnQU1B" b"AACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAAhSURBVDhPY3wro/KfgQLABKXJBqMG" b"jBoAAqMGDLwBDAwAEsoCTFWunmQAAAAASUVORK5CYII=") return headers, body if b"VIDEO" in request.GET: headers.append((b"Content-Type", b"video/webm")) body = open(os.path.join(request.doc_root, u"media", u"movie_5.ogv"), "rb").read() length = len(body) # If "PartialContent" is specified, the requestor wants to test range # requests. For the initial request, respond with "206 Partial Content" # and don't send the entire content. Then expect subsequent requests to # have a "Range" header with a byte range. Respond with that range. if b"PartialContent" in request.GET: if length < 1: return 500, headers, b"file is too small for range requests" start = 0 end = length - 1 if b"Range" in request.headers: range_header = request.headers[b"Range"] prefix = b"bytes=" split_header = range_header[len(prefix):].split(b"-") # The first request might be "bytes=0-". We want to force a range # request, so just return the first byte. if split_header[0] == b"0" and split_header[1] == b"": end = start # Otherwise, it is a range request. Respect the values sent. if split_header[0] != b"": start = int(split_header[0]) if split_header[1] != b"": end = int(split_header[1]) else: # The request doesn't have a range. Force a range request by # returning the first byte. end = start headers.append((b"Accept-Ranges", b"bytes")) headers.append((b"Content-Length", isomorphic_encode(str(end -start + 1)))) headers.append((b"Content-Range", b"bytes %d-%d/%d" % (start, end, length))) chunk = body[start:(end + 1)] return 206, headers, chunk return headers, body username = request.auth.username if request.auth.username else b"undefined" password = request.auth.password if request.auth.username else b"undefined" cookie = request.cookies[b'cookie'].value if b'cookie' in request.cookies else b"undefined" files = [] for key, values in request.POST.items(): assert len(values) == 1 value = values[0] if not hasattr(value, u"file"): continue data = value.file.read() files.append({u"key": isomorphic_decode(key), u"name": value.file.name, u"type": value.type, u"error": 0, #TODO, u"size": len(data), u"content": data}) get_data = {isomorphic_decode(key):isomorphic_decode(request.GET[key]) for key, value in request.GET.items()} post_data = {isomorphic_decode(key):isomorphic_decode(request.POST[key]) for key, value in request.POST.items() if not hasattr(request.POST[key], u"file")} headers_data = {isomorphic_decode(key):isomorphic_decode(request.headers[key]) for key, value in request.headers.items()} data = {u"jsonpResult": u"success", u"method": request.method, u"headers": headers_data, u"body": isomorphic_decode(request.body), u"files": files, u"GET": get_data, u"POST": post_data, u"username": isomorphic_decode(username), u"password": isomorphic_decode(password), u"cookie": isomorphic_decode(cookie)} return headers, u"report( %s )" % json.dumps(data)
mpl-2.0
7,991,890,450,060,178,000
42.491379
125
0.605154
false