repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dylanjbarth/luigi | test/contrib/spark_test.py | 4 | 11041 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import os
import luigi
import luigi.contrib.hdfs
from luigi import six
from luigi.mock import MockTarget
from helpers import with_config
from luigi.contrib.spark import SparkJobError, SparkSubmitTask, PySparkTask, PySpark1xJob, Spark1xJob, SparkJob
from mock import patch, MagicMock
BytesIO = six.BytesIO
def poll_generator():
yield None
yield 1
def setup_run_process(proc):
poll_gen = poll_generator()
proc.return_value.poll = lambda: next(poll_gen)
proc.return_value.returncode = 0
proc.return_value.stdout = BytesIO()
proc.return_value.stderr = BytesIO()
class TestSparkSubmitTask(SparkSubmitTask):
deploy_mode = "client"
name = "AppName"
entry_class = "org.test.MyClass"
jars = ["jars/my.jar"]
py_files = ["file1.py", "file2.py"]
files = ["file1", "file2"]
conf = {"Prop": "Value"}
properties_file = "conf/spark-defaults.conf"
driver_memory = "4G"
driver_java_options = "-Xopt"
driver_library_path = "library/path"
driver_class_path = "class/path"
executor_memory = "8G"
driver_cores = 8
supervise = True
total_executor_cores = 150
executor_cores = 10
queue = "queue"
num_executors = 2
archives = ["archive1", "archive2"]
app = "file"
def app_options(self):
return ["arg1", "arg2"]
def output(self):
return luigi.LocalTarget('output')
class TestDefaultSparkSubmitTask(SparkSubmitTask):
app = 'test.py'
def output(self):
return luigi.LocalTarget('output')
class TestPySparkTask(PySparkTask):
def input(self):
return MockTarget('input')
def output(self):
return MockTarget('output')
def main(self, sc, *args):
sc.textFile(self.input().path).saveAsTextFile(self.output().path)
class HdfsJob(luigi.ExternalTask):
def output(self):
return luigi.contrib.hdfs.HdfsTarget('test')
class TestSparkJob(SparkJob):
spark_workers = '2'
spark_master_memory = '1g'
spark_worker_memory = '1g'
def requires_hadoop(self):
return HdfsJob()
def jar(self):
return 'jar'
def job_class(self):
return 'job_class'
def output(self):
return luigi.LocalTarget('output')
class TestSpark1xJob(Spark1xJob):
def jar(self):
return 'jar'
def job_class(self):
return 'job_class'
def output(self):
return luigi.LocalTarget('output')
class TestPySpark1xJob(PySpark1xJob):
def program(self):
return 'python_file'
def output(self):
return luigi.LocalTarget('output')
class SparkSubmitTaskTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss, 'master': "yarn-client", 'hadoop-conf-dir': 'path'}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestSparkSubmitTask()
job.run()
self.assertEqual(proc.call_args[0][0],
['ss-stub', '--master', 'yarn-client', '--deploy-mode', 'client', '--name', 'AppName',
'--class', 'org.test.MyClass', '--jars', 'jars/my.jar', '--py-files', 'file1.py,file2.py',
'--files', 'file1,file2', '--archives', 'archive1,archive2', '--conf', 'Prop=Value',
'--properties-file', 'conf/spark-defaults.conf', '--driver-memory', '4G', '--driver-java-options', '-Xopt',
'--driver-library-path', 'library/path', '--driver-class-path', 'class/path', '--executor-memory', '8G',
'--driver-cores', '8', '--supervise', '--total-executor-cores', '150', '--executor-cores', '10',
'--queue', 'queue', '--num-executors', '2', 'file', 'arg1', 'arg2'])
@with_config({'spark': {'spark-submit': ss, 'master': 'spark://host:7077', 'conf': 'prop1=val1', 'jars': 'jar1.jar,jar2.jar',
'files': 'file1,file2', 'py-files': 'file1.py,file2.py', 'archives': 'archive1'}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_defaults(self, proc):
proc.return_value.returncode = 0
job = TestDefaultSparkSubmitTask()
job.run()
self.assertEqual(proc.call_args[0][0],
['ss-stub', '--master', 'spark://host:7077', '--jars', 'jar1.jar,jar2.jar',
'--py-files', 'file1.py,file2.py', '--files', 'file1,file2', '--archives', 'archive1',
'--conf', 'prop1=val1', 'test.py'])
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestSparkSubmitTask()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
@patch('luigi.contrib.spark.subprocess.Popen')
def test_app_must_be_set(self, proc):
with self.assertRaises(NotImplementedError):
job = SparkSubmitTask()
job.run()
@patch('luigi.contrib.spark.subprocess.Popen')
def test_app_interruption(self, proc):
def interrupt():
raise KeyboardInterrupt()
proc.return_value.wait = interrupt
try:
job = TestSparkSubmitTask()
job.run()
except KeyboardInterrupt:
pass
proc.return_value.kill.assert_called()
class PySparkTaskTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss, 'master': "spark://host:7077"}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestPySparkTask()
job.run()
proc_arg_list = proc.call_args[0][0]
self.assertEqual(proc_arg_list[0:7], ['ss-stub', '--master', 'spark://host:7077', '--deploy-mode', 'client', '--name', 'TestPySparkTask'])
self.assertTrue(os.path.exists(proc_arg_list[7]))
self.assertTrue(proc_arg_list[8].endswith('TestPySparkTask.pickle'))
@with_config({'spark': {'py-packages': 'dummy_test_module'}})
@patch.dict('sys.modules', {'pyspark': MagicMock()})
@patch('pyspark.SparkContext')
def test_pyspark_runner(self, spark_context):
sc = spark_context.return_value.__enter__.return_value
def mock_spark_submit(task):
from luigi.contrib.pyspark_runner import PySparkRunner
PySparkRunner(*task.app_command()[1:]).run()
# Check py-package exists
self.assertTrue(os.path.exists(sc.addPyFile.call_args[0][0]))
with patch.object(SparkSubmitTask, 'run', mock_spark_submit):
job = TestPySparkTask()
job.run()
sc.textFile.assert_called_with('input')
sc.textFile.return_value.saveAsTextFile.assert_called_with('output')
class SparkJobTest(unittest.TestCase):
hcd = 'hcd-stub'
ycd = 'ycd-stub'
sj = 'sj-stub'
sc = 'sc-sub'
@with_config({'spark': {'hadoop-conf-dir': hcd, 'yarn-conf-dir': ycd, 'spark-jar': sj, 'spark-class': sc}})
@patch('luigi.contrib.spark.subprocess.Popen')
@patch('luigi.contrib.hdfs.HdfsTarget')
def test_run(self, target, proc):
setup_run_process(proc)
job = TestSparkJob()
job.run()
self.assertEqual(proc.call_args[0][0], [self.sc, 'org.apache.spark.deploy.yarn.Client', '--jar', job.jar(), '--class', job.job_class(),
'--num-workers', '2', '--master-memory', '1g', '--worker-memory', '1g'])
@with_config({'spark': {'hadoop-conf-dir': hcd, 'yarn-conf-dir': ycd, 'spark-jar': sj, 'spark-class': sc}})
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestSparkJob()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
class Spark1xTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestSpark1xJob()
job.run()
self.assertEqual(proc.call_args[0][0], [self.ss, '--master', 'yarn-client', '--class', job.job_class(), job.jar()])
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestSpark1xJob()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
class PySpark1xTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestPySpark1xJob()
job.run()
self.assertEqual(proc.call_args[0][0], [self.ss, '--master', 'yarn-client', job.program()])
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestPySpark1xJob()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
| apache-2.0 |
code4futuredotorg/reeborg_tw | src/libraries/Brython3.2.3/Lib/encodings/cp865.py | 272 | 34618 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xa4' # 0x00af -> CURRENCY SIGN
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| agpl-3.0 |
etkirsch/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
Bachmann1234/diff-cover | diff_cover/report_generator.py | 1 | 11385 | """
Classes for generating diff coverage reports.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from jinja2 import Environment, PackageLoader
from jinja2_pluralize import pluralize_dj
from diff_cover.snippets import Snippet
import six
class DiffViolations(object):
"""
Class to capture violations generated by a particular diff
"""
def __init__(self, violations, measured_lines, diff_lines):
self.lines = {
violation.line for violation in violations
}.intersection(diff_lines)
self.violations = {
violation for violation in violations
if violation.line in self.lines
}
# By convention, a violation reporter
# can return `None` to indicate that all lines are "measured"
# by default. This is an optimization to avoid counting
# lines in all the source files.
if measured_lines is None:
self.measured_lines = set(diff_lines)
else:
self.measured_lines = set(measured_lines).intersection(diff_lines)
class BaseReportGenerator(object):
"""
Generate a diff coverage report.
"""
__metaclass__ = ABCMeta
def __init__(self, violations_reporter, diff_reporter):
"""
Configure the report generator to build a report
from `violations_reporter` (of type BaseViolationReporter)
and `diff_reporter` (of type BaseDiffReporter)
"""
self._violations = violations_reporter
self._diff = diff_reporter
self._diff_violations_dict = None
self._cache_violations = None
@abstractmethod
def generate_report(self, output_file):
"""
Write the report to `output_file`, which is a file-like
object implementing the `write()` method.
Concrete subclasses should access diff coverage info
using the base class methods.
"""
pass
def coverage_report_name(self):
"""
Return the name of the coverage report.
"""
return self._violations.name()
def diff_report_name(self):
"""
Return the name of the diff.
"""
return self._diff.name()
def src_paths(self):
"""
Return a list of source files in the diff
for which we have coverage information.
"""
return {src for src, summary in self._diff_violations().items()
if len(summary.measured_lines) > 0}
def percent_covered(self, src_path):
"""
Return a float percent of lines covered for the source
in `src_path`.
If we have no coverage information for `src_path`, returns None
"""
diff_violations = self._diff_violations().get(src_path)
if diff_violations is None:
return None
# Protect against a divide by zero
num_measured = len(diff_violations.measured_lines)
if num_measured > 0:
num_uncovered = len(diff_violations.lines)
return 100 - float(num_uncovered) / num_measured * 100
else:
return None
def violation_lines(self, src_path):
"""
Return a list of lines in violation (integers)
in `src_path` that were changed.
If we have no coverage information for
`src_path`, returns an empty list.
"""
diff_violations = self._diff_violations().get(src_path)
if diff_violations is None:
return []
return sorted(diff_violations.lines)
def total_num_lines(self):
"""
Return the total number of lines in the diff for
which we have coverage info.
"""
return sum([len(summary.measured_lines) for summary
in self._diff_violations().values()])
def total_num_violations(self):
"""
Returns the total number of lines in the diff
that are in violation.
"""
return sum(
len(summary.lines)
for summary
in self._diff_violations().values()
)
def total_percent_covered(self):
"""
Returns the float percent of lines in the diff that are covered.
(only counting lines for which we have coverage info).
"""
total_lines = self.total_num_lines()
if total_lines > 0:
num_covered = total_lines - self.total_num_violations()
return int(float(num_covered) / total_lines * 100)
else:
return 100
def _diff_violations(self):
"""
Returns a dictionary of the form:
{ SRC_PATH: DiffViolations(SRC_PATH) }
where `SRC_PATH` is the path to the source file.
To make this efficient, we cache and reuse the result.
"""
if not self._diff_violations_dict:
self._diff_violations_dict = {
src_path: DiffViolations(
self._violations.violations(src_path),
self._violations.measured_lines(src_path),
self._diff.lines_changed(src_path),
)
for src_path in self._diff.src_paths_changed()
}
return self._diff_violations_dict
# Set up the template environment
TEMPLATE_LOADER = PackageLoader(__package__)
TEMPLATE_ENV = Environment(loader=TEMPLATE_LOADER,
trim_blocks=True,
lstrip_blocks=True)
TEMPLATE_ENV.filters['iteritems'] = six.iteritems
TEMPLATE_ENV.filters['pluralize'] = pluralize_dj
class TemplateReportGenerator(BaseReportGenerator):
"""
Reporter that uses a template to generate the report.
"""
# Subclasses override this to specify the name of the templates
# If not overridden, the template reporter will raise an exception
TEMPLATE_NAME = None
CSS_TEMPLATE_NAME = None
# Subclasses should set this to True to indicate
# that they want to include source file snippets.
INCLUDE_SNIPPETS = False
def __init__(self, violations_reporter, diff_reporter, css_url=None):
super(TemplateReportGenerator, self).__init__(violations_reporter, diff_reporter)
self.css_url = css_url
def generate_report(self, output_file):
"""
See base class.
output_file must be a file handler that takes in bytes!
"""
if self.TEMPLATE_NAME is not None:
template = TEMPLATE_ENV.get_template(self.TEMPLATE_NAME)
report = template.render(self._context())
if isinstance(report, six.string_types):
report = report.encode('utf-8')
output_file.write(report)
def generate_css(self, output_file):
"""
Generate an external style sheet file.
output_file must be a file handler that takes in bytes!
"""
if self.CSS_TEMPLATE_NAME is not None:
template = TEMPLATE_ENV.get_template(self.CSS_TEMPLATE_NAME)
style = template.render(self._context())
if isinstance(style, six.string_types):
style = style.encode('utf-8')
output_file.write(style)
def _context(self):
"""
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
"""
# Calculate the information to pass to the template
src_stats = {
src: self._src_path_stats(src) for src in self.src_paths()
}
# Include snippet style info if we're displaying
# source code snippets
if self.INCLUDE_SNIPPETS:
snippet_style = Snippet.style_defs()
else:
snippet_style = None
return {
'css_url': self.css_url,
'report_name': self.coverage_report_name(),
'diff_name': self.diff_report_name(),
'src_stats': src_stats,
'total_num_lines': self.total_num_lines(),
'total_num_violations': self.total_num_violations(),
'total_percent_covered': self.total_percent_covered(),
'snippet_style': snippet_style
}
@staticmethod
def combine_adjacent_lines(line_numbers):
"""
Given a sorted collection of line numbers this will
turn them to strings and combine adjacent values
[1, 2, 5, 6, 100] -> ["1-2", "5-6", "100"]
"""
combine_template = "{0}-{1}"
combined_list = []
# Add a terminating value of `None` to list
line_numbers.append(None)
start = line_numbers[0]
end = None
for line_number in line_numbers[1:]:
# If the current number is adjacent to the previous number
if (end if end else start) + 1 == line_number:
end = line_number
else:
if end:
combined_list.append(combine_template.format(start, end))
else:
combined_list.append(str(start))
start = line_number
end = None
return combined_list
def _src_path_stats(self, src_path):
"""
Return a dict of statistics for the source file at `src_path`.
"""
# Find violation lines
violation_lines = self.violation_lines(src_path)
violations = sorted(self._diff_violations()[src_path].violations)
# Load source snippets (if the report will display them)
# If we cannot load the file, then fail gracefully
if self.INCLUDE_SNIPPETS:
try:
snippets = Snippet.load_snippets_html(src_path, violation_lines)
except IOError:
snippets = []
else:
snippets = []
return {
'percent_covered': self.percent_covered(src_path),
'violation_lines': TemplateReportGenerator.combine_adjacent_lines(violation_lines),
'violations': violations,
'snippets_html': snippets
}
class StringReportGenerator(TemplateReportGenerator):
"""
Generate a string diff coverage report.
"""
TEMPLATE_NAME = "console_coverage_report.txt"
class HtmlReportGenerator(TemplateReportGenerator):
"""
Generate an HTML formatted diff coverage report.
"""
TEMPLATE_NAME = "html_coverage_report.html"
CSS_TEMPLATE_NAME = "external_style.css"
INCLUDE_SNIPPETS = True
class StringQualityReportGenerator(TemplateReportGenerator):
"""
Generate a string diff quality report.
"""
TEMPLATE_NAME = "console_quality_report.txt"
class HtmlQualityReportGenerator(TemplateReportGenerator):
"""
Generate an HTML formatted diff quality report.
"""
TEMPLATE_NAME = "html_quality_report.html"
CSS_TEMPLATE_NAME = "external_style.css"
INCLUDE_SNIPPETS = True
| apache-2.0 |
FrankBian/kuma | kuma/demos/admin.py | 5 | 6125 | from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.util import model_ngettext, get_deleted_objects
from django.db import router
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
from kuma.core.managers import NamespacedTaggableManager
from taggit.forms import TagWidget
from .models import Submission
def censor_selected(modeladmin, request, queryset):
"""
Censor the selected submissions, with confirmation interstitial.
Largely stolen from django.contrib.admin.actions.delete_selected
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
censored_url = request.POST.get('censored_url', None)
n = queryset.count()
if n:
for obj in queryset:
obj.censor(url=censored_url)
obj_display = force_unicode(obj)
modeladmin.message_user(request, _("Censored %(item)s") % {
"item": obj_display
})
modeladmin.message_user(request,
_("Successfully censored %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
context = {
"title": _("Are you sure?"),
"object_name": objects_name,
"queryset": queryset,
"opts": opts,
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request,
'admin/demos/submission/censor_selected_confirmation.html',
context, current_app=modeladmin.admin_site.name)
censor_selected.short_description = ugettext_lazy("Censor selected %(verbose_name_plural)s")
def delete_selected(modeladmin, request, queryset):
"""
The out-of-box Django delete never calls Submission.delete(), so this is a
mostly redundant lift-and-hack to ensure that happens. This is important
because Submission.delete() also cleans up its uploaded files.
See also: https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
obj.delete()
modeladmin.message_user(request,
_("Deleted and uploaded files for %(item)s") % {
"item": obj_display
})
modeladmin.message_user(request,
_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": objects_name,
"deletable_objects": [deletable_objects],
"queryset": queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
class SubmissionAdmin(admin.ModelAdmin):
actions = (delete_selected, censor_selected,)
list_display = ('title', 'creator', 'featured', 'censored', 'hidden',
'taggit_tags', 'modified', )
list_editable = ('featured', 'taggit_tags', )
search_fields = ('title', 'summary', 'description', 'taggit_tags__name')
list_filter = ('censored', 'hidden', 'created', 'modified')
readonly_fields = ('censored',)
formfield_overrides = {
NamespacedTaggableManager: {
"widget": TagWidget(attrs={"size": 70})
}
}
def queryset(self, request):
return Submission.admin_manager
admin.site.register(Submission, SubmissionAdmin)
| mpl-2.0 |
leiferikb/bitpop | build/third_party/twisted_10_2/twisted/news/test/test_nntp.py | 55 | 3417 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.news import database
from twisted.news import nntp
from twisted.protocols import loopback
ALL_GROUPS = ('alt.test.nntp', 0, 1, 'y'),
GROUP = ('0', '1', '0', 'alt.test.nntp', 'group', 'selected')
SUBSCRIPTIONS = ['alt.test.nntp', 'news.testgroup']
POST_STRING = """Path: not-for-mail
From: <[email protected]>
Subject: a test
Newsgroups: alt.test.nntp
Organization:
Summary:
Keywords:
User-Agent: tin/1.4.5-20010409 ("One More Nightmare") (UNIX) (Linux/2.4.17 (i686))
this is a test
.
..
...
lala
moo
--
"One World, one Web, one Program." - Microsoft(R) promotional ad
"Ein Volk, ein Reich, ein Fuhrer." - Adolf Hitler
--
10:56pm up 4 days, 4:42, 1 user, load average: 0.08, 0.08, 0.12
"""
class TestNNTPClient(nntp.NNTPClient):
def __init__(self):
nntp.NNTPClient.__init__(self)
def assertEquals(self, foo, bar):
if foo != bar: raise AssertionError("%r != %r!" % (foo, bar))
def connectionMade(self):
nntp.NNTPClient.connectionMade(self)
self.fetchSubscriptions()
def gotSubscriptions(self, subscriptions):
self.assertEquals(len(subscriptions), len(SUBSCRIPTIONS))
for s in subscriptions:
assert s in SUBSCRIPTIONS
self.fetchGroups()
def gotAllGroups(self, info):
self.assertEquals(len(info), len(ALL_GROUPS))
self.assertEquals(info[0], ALL_GROUPS[0])
self.fetchGroup('alt.test.nntp')
def getAllGroupsFailed(self, error):
raise AssertionError("fetchGroups() failed: %s" % (error,))
def gotGroup(self, info):
self.assertEquals(len(info), 6)
self.assertEquals(info, GROUP)
self.postArticle(POST_STRING)
def getSubscriptionsFailed(self, error):
raise AssertionError("fetchSubscriptions() failed: %s" % (error,))
def getGroupFailed(self, error):
raise AssertionError("fetchGroup() failed: %s" % (error,))
def postFailed(self, error):
raise AssertionError("postArticle() failed: %s" % (error,))
def postedOk(self):
self.fetchArticle(1)
def gotArticle(self, info):
origBody = POST_STRING.split('\n\n')[1]
newBody = info.split('\n\n', 1)[1]
self.assertEquals(origBody, newBody)
# We're done
self.transport.loseConnection()
def getArticleFailed(self, error):
raise AssertionError("fetchArticle() failed: %s" % (error,))
class NNTPTestCase(unittest.TestCase):
def setUp(self):
self.server = nntp.NNTPServer()
self.server.factory = self
self.backend = database.NewsShelf(None, 'news.db')
self.backend.addGroup('alt.test.nntp', 'y')
for s in SUBSCRIPTIONS:
self.backend.addSubscription(s)
self.client = TestNNTPClient()
def testLoopback(self):
return loopback.loopbackAsync(self.server, self.client)
# XXX This test is woefully incomplete. It tests the single
# most common code path and nothing else. Expand it and the
# test fairy will leave you a surprise.
# reactor.iterate(1) # fetchGroups()
# reactor.iterate(1) # fetchGroup()
# reactor.iterate(1) # postArticle()
| gpl-3.0 |
jjmleiro/hue | desktop/core/ext-py/pycparser-2.14/pycparser/c_lexer.py | 42 | 14447 | #------------------------------------------------------------------------------
# pycparser: c_lexer.py
#
# CLexer class: lexer for the C language
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
import sys
from .ply import lex
from .ply.lex import TOKEN
class CLexer(object):
""" A lexer for the C language. After building it, set the
input text with input(), and call token() to get new
tokens.
The public attribute filename can be set to an initial
filaneme, but the lexer will update it upon #line
directives.
"""
def __init__(self, error_func, on_lbrace_func, on_rbrace_func,
type_lookup_func):
""" Create a new Lexer.
error_func:
An error function. Will be called with an error
message, line and column as arguments, in case of
an error during lexing.
on_lbrace_func, on_rbrace_func:
Called when an LBRACE or RBRACE is encountered
(likely to push/pop type_lookup_func's scope)
type_lookup_func:
A type lookup function. Given a string, it must
return True IFF this string is a name of a type
that was defined with a typedef earlier.
"""
self.error_func = error_func
self.on_lbrace_func = on_lbrace_func
self.on_rbrace_func = on_rbrace_func
self.type_lookup_func = type_lookup_func
self.filename = ''
# Keeps track of the last token returned from self.token()
self.last_token = None
# Allow either "# line" or "# <num>" to support GCC's
# cpp output
#
self.line_pattern = re.compile('([ \t]*line\W)|([ \t]*\d+)')
self.pragma_pattern = re.compile('[ \t]*pragma\W')
def build(self, **kwargs):
""" Builds the lexer from the specification. Must be
called after the lexer object is created.
This method exists separately, because the PLY
manual warns against calling lex.lex inside
__init__
"""
self.lexer = lex.lex(object=self, **kwargs)
def reset_lineno(self):
""" Resets the internal line number counter of the lexer.
"""
self.lexer.lineno = 1
def input(self, text):
self.lexer.input(text)
def token(self):
self.last_token = self.lexer.token()
return self.last_token
def find_tok_column(self, token):
""" Find the column of the token in its line.
"""
last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos)
return token.lexpos - last_cr
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _make_tok_location(self, token):
return (token.lineno, self.find_tok_column(token))
##
## Reserved keywords
##
keywords = (
'_BOOL', '_COMPLEX', 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST',
'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG',
'REGISTER', 'OFFSETOF',
'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
'VOLATILE', 'WHILE',
)
keyword_map = {}
for keyword in keywords:
if keyword == '_BOOL':
keyword_map['_Bool'] = keyword
elif keyword == '_COMPLEX':
keyword_map['_Complex'] = keyword
else:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'ID',
# Type identifiers (identifiers previously defined as
# types with typedef)
'TYPEID',
# constants
'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN',
'FLOAT_CONST', 'HEX_FLOAT_CONST',
'CHAR_CONST',
'WCHAR_CONST',
# String literals
'STRING_LITERAL',
'WSTRING_LITERAL',
# Operators
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL',
'OREQUAL',
# Increment/decrement
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Conditional operator (?)
'CONDOP',
# Delimeters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'COMMA', 'PERIOD', # . ,
'SEMI', 'COLON', # ; :
# Ellipsis (...)
'ELLIPSIS',
# pre-processor
'PPHASH', # '#'
)
##
## Regexes for use in tokens
##
##
# valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers)
identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
bin_prefix = '0[bB]'
bin_digits = '[01]+'
# integer constants (K&R2: A.2.5.1)
integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?'
decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
octal_constant = '0[0-7]*'+integer_suffix_opt
hex_constant = hex_prefix+hex_digits+integer_suffix_opt
bin_constant = bin_prefix+bin_digits+integer_suffix_opt
bad_octal_constant = '0[0-7]*[89]'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
char_const = "'"+cconst_char+"'"
wchar_const = 'L'+char_const
unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')"""
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
wstring_literal = 'L'+string_literal
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)'
binary_exponent_part = r'''([pP][+-]?[0-9]+)'''
hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))"""
hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)'
##
## Lexer states: used for preprocessor \n-terminated directives
##
states = (
# ppline: preprocessor line directives
#
('ppline', 'exclusive'),
# pppragma: pragma
#
('pppragma', 'exclusive'),
)
def t_PPHASH(self, t):
r'[ \t]*\#'
if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
t.lexer.begin('ppline')
self.pp_line = self.pp_filename = None
elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
t.lexer.begin('pppragma')
else:
t.type = 'PPHASH'
return t
##
## Rules for the ppline state
##
@TOKEN(string_literal)
def t_ppline_FILENAME(self, t):
if self.pp_line is None:
self._error('filename before line number in #line', t)
else:
self.pp_filename = t.value.lstrip('"').rstrip('"')
@TOKEN(decimal_constant)
def t_ppline_LINE_NUMBER(self, t):
if self.pp_line is None:
self.pp_line = t.value
else:
# Ignore: GCC's cpp sometimes inserts a numeric flag
# after the file name
pass
def t_ppline_NEWLINE(self, t):
r'\n'
if self.pp_line is None:
self._error('line number missing in #line', t)
else:
self.lexer.lineno = int(self.pp_line)
if self.pp_filename is not None:
self.filename = self.pp_filename
t.lexer.begin('INITIAL')
def t_ppline_PPLINE(self, t):
r'line'
pass
t_ppline_ignore = ' \t'
def t_ppline_error(self, t):
self._error('invalid #line directive', t)
##
## Rules for the pppragma state
##
def t_pppragma_NEWLINE(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.begin('INITIAL')
def t_pppragma_PPPRAGMA(self, t):
r'pragma'
pass
t_pppragma_ignore = ' \t<>.-{}();=+-*/$%@&^~!?:,0123456789'
@TOKEN(string_literal)
def t_pppragma_STR(self, t): pass
@TOKEN(identifier)
def t_pppragma_ID(self, t): pass
def t_pppragma_error(self, t):
self._error('invalid #pragma directive', t)
##
## Rules for the normal state
##
t_ignore = ' \t'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# ->
t_ARROW = r'->'
# ?
t_CONDOP = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Scope delimiters
# To see why on_lbrace_func is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# TT x = 5;
# Outside the function, TT is a typedef, but inside (starting and ending
# with the braces) it's a parameter. The trouble begins with yacc's
# lookahead token. If we open a new scope in brace_open, then TT has
# already been read and incorrectly interpreted as TYPEID. So, we need
# to open and close scopes from within the lexer.
# Similar for the TT immediately outside the end of the function.
#
@TOKEN(r'\{')
def t_LBRACE(self, t):
self.on_lbrace_func()
return t
@TOKEN(r'\}')
def t_RBRACE(self, t):
self.on_rbrace_func()
return t
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_floating_constant)
def t_HEX_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(bin_constant)
def t_INT_CONST_BIN(self, t):
return t
@TOKEN(bad_octal_constant)
def t_BAD_CONST_OCT(self, t):
msg = "Invalid octal constant"
self._error(msg, t)
@TOKEN(octal_constant)
def t_INT_CONST_OCT(self, t):
return t
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# Must come before bad_char_const, to prevent it from
# catching valid char constants as invalid
#
@TOKEN(char_const)
def t_CHAR_CONST(self, t):
return t
@TOKEN(wchar_const)
def t_WCHAR_CONST(self, t):
return t
@TOKEN(unmatched_quote)
def t_UNMATCHED_QUOTE(self, t):
msg = "Unmatched '"
self._error(msg, t)
@TOKEN(bad_char_const)
def t_BAD_CHAR_CONST(self, t):
msg = "Invalid char constant %s" % t.value
self._error(msg, t)
@TOKEN(wstring_literal)
def t_WSTRING_LITERAL(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.keyword_map.get(t.value, "ID")
if t.type == 'ID' and self.type_lookup_func(t.value):
t.type = "TYPEID"
return t
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
| apache-2.0 |
carlosfunk/django-environ | docs/conf.py | 4 | 7832 | # -*- coding: utf-8 -*-
#
# Django-environ documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 1 23:01:04 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.dirname(os.path.abspath('.')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django-environ'
copyright = u'2013, joke2k'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Django-environdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Django-environ.tex', u'Django-environ Documentation',
u'joke2k', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-environ', u'Django-environ Documentation',
[u'joke2k'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Django-environ', u'Django-environ Documentation',
u'joke2k', 'Django-environ', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
metiscus/pychess | lib/pychess/System/TaskQueue.py | 22 | 2187 | # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475160
# Was accepted into Python 2.5, but earlier versions still have
# to do stuff manually
import threading
from pychess.compat import Queue
def TaskQueue ():
if hasattr(Queue, "task_done"):
return Queue()
return _TaskQueue()
class _TaskQueue(Queue):
def __init__(self):
Queue.__init__(self)
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
| gpl-3.0 |
ynov/nwg | deps/boost/tools/build/v2/test/build_file.py | 44 | 5117 | #!/usr/bin/python
# Copyright (C) 2006. Vladimir Prus
# Copyright (C) 2008. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that we explicitly request a file (not target) to be built by
# specifying its name on the command line.
import BoostBuild
###############################################################################
#
# test_building_file_from_specific_project()
# ------------------------------------------
#
###############################################################################
def test_building_file_from_specific_project():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
build-project sub ;
""")
t.write("hello.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
exe sub : hello.cpp ;
""")
t.write("sub/hello.cpp", "int main() {}\n")
t.run_build_system(["sub", t.adjust_suffix("hello.obj")])
t.expect_output_lines("*depends on itself*", False)
t.expect_addition("sub/bin/$toolset/debug/hello.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_file_from_specific_target()
# -----------------------------------------
#
###############################################################################
def test_building_file_from_specific_target():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
t.run_build_system(["hello1", t.adjust_suffix("hello1.obj")])
t.expect_addition("bin/$toolset/debug/hello1.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_missing_file_from_specific_target()
# -------------------------------------------------
#
###############################################################################
def test_building_missing_file_from_specific_target():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
obj = t.adjust_suffix("hello2.obj")
t.run_build_system(["hello1", obj], status=1)
t.expect_output_lines("don't know how to make*" + obj)
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_multiple_files_with_different_names()
# ---------------------------------------------------
#
###############################################################################
def test_building_multiple_files_with_different_names():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
t.run_build_system([t.adjust_suffix("hello1.obj"), t.adjust_suffix(
"hello2.obj")])
t.expect_addition("bin/$toolset/debug/hello1.obj")
t.expect_addition("bin/$toolset/debug/hello2.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_multiple_files_with_the_same_name()
# -------------------------------------------------
#
###############################################################################
def test_building_multiple_files_with_the_same_name():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
build-project sub ;
""")
t.write("hello.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
exe sub : hello.cpp ;
""")
t.write("sub/hello.cpp", "int main() {}\n")
t.run_build_system([t.adjust_suffix("hello.obj")])
t.expect_output_lines("*depends on itself*", False)
t.expect_addition("bin/$toolset/debug/hello.obj")
t.expect_addition("sub/bin/$toolset/debug/hello.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_building_file_from_specific_project()
test_building_file_from_specific_target()
test_building_missing_file_from_specific_target()
test_building_multiple_files_with_different_names()
test_building_multiple_files_with_the_same_name()
| mit |
duanwujie/depot_tools | third_party/logilab/astroid/manager.py | 56 | 15736 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""astroid manager: avoid multiple astroid build of a same module when
possible by providing a class responsible to get astroid representation
from various source and using a cache of built modules)
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
import collections
import imp
import os
from os.path import dirname, join, isdir, exists
from warnings import warn
import zipimport
from logilab.common.configuration import OptionsProviderMixIn
from astroid.exceptions import AstroidBuildingException
from astroid import modutils
def astroid_wrapper(func, modname):
"""wrapper to give to AstroidManager.project_from_files"""
print('parsing %s...' % modname)
try:
return func(modname)
except AstroidBuildingException as exc:
print(exc)
except Exception as exc:
import traceback
traceback.print_exc()
def _silent_no_wrap(func, modname):
"""silent wrapper that doesn't do anything; can be used for tests"""
return func(modname)
def safe_repr(obj):
try:
return repr(obj)
except:
return '???'
class AstroidManager(OptionsProviderMixIn):
"""the astroid manager, responsible to build astroid from files
or modules.
Use the Borg pattern.
"""
name = 'astroid loader'
options = (("ignore",
{'type' : "csv", 'metavar' : "<file>",
'dest' : "black_list", "default" : ('CVS',),
'help' : "add <file> (may be a directory) to the black list\
. It should be a base name, not a path. You may set this option multiple times\
."}),
("project",
{'default': "No Name", 'type' : 'string', 'short': 'p',
'metavar' : '<project name>',
'help' : 'set the project name.'}),
)
brain = {}
def __init__(self):
self.__dict__ = AstroidManager.brain
if not self.__dict__:
OptionsProviderMixIn.__init__(self)
self.load_defaults()
# NOTE: cache entries are added by the [re]builder
self.astroid_cache = {}
self._mod_file_cache = {}
self.transforms = collections.defaultdict(list)
self._failed_import_hooks = []
self.always_load_extensions = False
self.extension_package_whitelist = set()
def ast_from_file(self, filepath, modname=None, fallback=True, source=False):
"""given a module name, return the astroid object"""
try:
filepath = modutils.get_source_file(filepath, include_no_ext=True)
source = True
except modutils.NoSourceFile:
pass
if modname is None:
try:
modname = '.'.join(modutils.modpath_from_file(filepath))
except ImportError:
modname = filepath
if modname in self.astroid_cache and self.astroid_cache[modname].file == filepath:
return self.astroid_cache[modname]
if source:
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).file_build(filepath, modname)
elif fallback and modname:
return self.ast_from_module_name(modname)
raise AstroidBuildingException('unable to get astroid for file %s' %
filepath)
def _build_stub_module(self, modname):
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).string_build('', modname)
def _can_load_extension(self, modname):
if self.always_load_extensions:
return True
if modutils.is_standard_module(modname):
return True
parts = modname.split('.')
return any(
'.'.join(parts[:x]) in self.extension_package_whitelist
for x in range(1, len(parts) + 1))
def ast_from_module_name(self, modname, context_file=None):
"""given a module name, return the astroid object"""
if modname in self.astroid_cache:
return self.astroid_cache[modname]
if modname == '__main__':
return self._build_stub_module(modname)
old_cwd = os.getcwd()
if context_file:
os.chdir(dirname(context_file))
try:
filepath, mp_type = self.file_from_module_name(modname, context_file)
if mp_type == modutils.PY_ZIPMODULE:
module = self.zip_import_data(filepath)
if module is not None:
return module
elif mp_type in (imp.C_BUILTIN, imp.C_EXTENSION):
if mp_type == imp.C_EXTENSION and not self._can_load_extension(modname):
return self._build_stub_module(modname)
try:
module = modutils.load_module_from_name(modname)
except Exception as ex:
msg = 'Unable to load module %s (%s)' % (modname, ex)
raise AstroidBuildingException(msg)
return self.ast_from_module(module, modname)
elif mp_type == imp.PY_COMPILED:
raise AstroidBuildingException("Unable to load compiled module %s" % (modname,))
if filepath is None:
raise AstroidBuildingException("Unable to load module %s" % (modname,))
return self.ast_from_file(filepath, modname, fallback=False)
except AstroidBuildingException as e:
for hook in self._failed_import_hooks:
try:
return hook(modname)
except AstroidBuildingException:
pass
raise e
finally:
os.chdir(old_cwd)
def zip_import_data(self, filepath):
if zipimport is None:
return None
from astroid.builder import AstroidBuilder
builder = AstroidBuilder(self)
for ext in ('.zip', '.egg'):
try:
eggpath, resource = filepath.rsplit(ext + os.path.sep, 1)
except ValueError:
continue
try:
importer = zipimport.zipimporter(eggpath + ext)
zmodname = resource.replace(os.path.sep, '.')
if importer.is_package(resource):
zmodname = zmodname + '.__init__'
module = builder.string_build(importer.get_source(resource),
zmodname, filepath)
return module
except:
continue
return None
def file_from_module_name(self, modname, contextfile):
try:
value = self._mod_file_cache[(modname, contextfile)]
except KeyError:
try:
value = modutils.file_info_from_modpath(
modname.split('.'), context_file=contextfile)
except ImportError as ex:
msg = 'Unable to load module %s (%s)' % (modname, ex)
value = AstroidBuildingException(msg)
self._mod_file_cache[(modname, contextfile)] = value
if isinstance(value, AstroidBuildingException):
raise value
return value
def ast_from_module(self, module, modname=None):
"""given an imported module, return the astroid object"""
modname = modname or module.__name__
if modname in self.astroid_cache:
return self.astroid_cache[modname]
try:
# some builtin modules don't have __file__ attribute
filepath = module.__file__
if modutils.is_python_source(filepath):
return self.ast_from_file(filepath, modname)
except AttributeError:
pass
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).module_build(module, modname)
def ast_from_class(self, klass, modname=None):
"""get astroid for the given class"""
if modname is None:
try:
modname = klass.__module__
except AttributeError:
raise AstroidBuildingException(
'Unable to get module for class %s' % safe_repr(klass))
modastroid = self.ast_from_module_name(modname)
return modastroid.getattr(klass.__name__)[0] # XXX
def infer_ast_from_something(self, obj, context=None):
"""infer astroid for the given class"""
if hasattr(obj, '__class__') and not isinstance(obj, type):
klass = obj.__class__
else:
klass = obj
try:
modname = klass.__module__
except AttributeError:
raise AstroidBuildingException(
'Unable to get module for %s' % safe_repr(klass))
except Exception as ex:
raise AstroidBuildingException(
'Unexpected error while retrieving module for %s: %s'
% (safe_repr(klass), ex))
try:
name = klass.__name__
except AttributeError:
raise AstroidBuildingException(
'Unable to get name for %s' % safe_repr(klass))
except Exception as ex:
raise AstroidBuildingException(
'Unexpected error while retrieving name for %s: %s'
% (safe_repr(klass), ex))
# take care, on living object __module__ is regularly wrong :(
modastroid = self.ast_from_module_name(modname)
if klass is obj:
for infered in modastroid.igetattr(name, context):
yield infered
else:
for infered in modastroid.igetattr(name, context):
yield infered.instanciate_class()
def project_from_files(self, files, func_wrapper=astroid_wrapper,
project_name=None, black_list=None):
"""return a Project from a list of files or modules"""
# build the project representation
project_name = project_name or self.config.project
black_list = black_list or self.config.black_list
project = Project(project_name)
for something in files:
if not exists(something):
fpath = modutils.file_from_modpath(something.split('.'))
elif isdir(something):
fpath = join(something, '__init__.py')
else:
fpath = something
astroid = func_wrapper(self.ast_from_file, fpath)
if astroid is None:
continue
# XXX why is first file defining the project.path ?
project.path = project.path or astroid.file
project.add_module(astroid)
base_name = astroid.name
# recurse in package except if __init__ was explicitly given
if astroid.package and something.find('__init__') == -1:
# recurse on others packages / modules if this is a package
for fpath in modutils.get_module_files(dirname(astroid.file),
black_list):
astroid = func_wrapper(self.ast_from_file, fpath)
if astroid is None or astroid.name == base_name:
continue
project.add_module(astroid)
return project
def register_transform(self, node_class, transform, predicate=None):
"""Register `transform(node)` function to be applied on the given
Astroid's `node_class` if `predicate` is None or returns true
when called with the node as argument.
The transform function may return a value which is then used to
substitute the original node in the tree.
"""
self.transforms[node_class].append((transform, predicate))
def unregister_transform(self, node_class, transform, predicate=None):
"""Unregister the given transform."""
self.transforms[node_class].remove((transform, predicate))
def register_failed_import_hook(self, hook):
"""Registers a hook to resolve imports that cannot be found otherwise.
`hook` must be a function that accepts a single argument `modname` which
contains the name of the module or package that could not be imported.
If `hook` can resolve the import, must return a node of type `astroid.Module`,
otherwise, it must raise `AstroidBuildingException`.
"""
self._failed_import_hooks.append(hook)
def transform(self, node):
"""Call matching transforms for the given node if any and return the
transformed node.
"""
cls = node.__class__
if cls not in self.transforms:
# no transform registered for this class of node
return node
transforms = self.transforms[cls]
orig_node = node # copy the reference
for transform_func, predicate in transforms:
if predicate is None or predicate(node):
ret = transform_func(node)
# if the transformation function returns something, it's
# expected to be a replacement for the node
if ret is not None:
if node is not orig_node:
# node has already be modified by some previous
# transformation, warn about it
warn('node %s substituted multiple times' % node)
node = ret
return node
def cache_module(self, module):
"""Cache a module if no module with the same name is known yet."""
self.astroid_cache.setdefault(module.name, module)
def clear_cache(self, astroid_builtin=None):
# XXX clear transforms
self.astroid_cache.clear()
# force bootstrap again, else we may ends up with cache inconsistency
# between the manager and CONST_PROXY, making
# unittest_lookup.LookupTC.test_builtin_lookup fail depending on the
# test order
import astroid.raw_building
astroid.raw_building._astroid_bootstrapping(
astroid_builtin=astroid_builtin)
class Project(object):
"""a project handle a set of modules / packages"""
def __init__(self, name=''):
self.name = name
self.path = None
self.modules = []
self.locals = {}
self.__getitem__ = self.locals.__getitem__
self.__iter__ = self.locals.__iter__
self.values = self.locals.values
self.keys = self.locals.keys
self.items = self.locals.items
def add_module(self, node):
self.locals[node.name] = node
self.modules.append(node)
def get_module(self, name):
return self.locals[name]
def get_children(self):
return self.modules
def __repr__(self):
return '<Project %r at %s (%s modules)>' % (self.name, id(self),
len(self.modules))
| bsd-3-clause |
franekp/ankidict | ankidict/thirdparty/bs4/__init__.py | 2 | 18267 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson ([email protected])"
__version__ = "4.4.0"
__copyright__ = "Copyright (c) 2004-2015 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup, _htmlparser
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nTo get rid of this warning, change this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
**kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
original_features = features
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = _htmlparser.HTMLParserTreeBuilder
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
if not (original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES):
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
parser=builder.NAME,
markup_type=markup_type))
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
return type(self)(self.encode(), builder=self.builder)
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and not self.builder.picklable:
del d['builder']
return d
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
return subclass(s)
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
previous_element = most_recent_element or self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if not previous_element:
previous_element = o.previous_element
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
if parent.next_sibling:
# This node is being inserted into an element that has
# already been parsed. Deal with any dangling references.
index = parent.contents.index(o)
if index == 0:
previous_element = parent
previous_sibling = None
else:
previous_element = previous_sibling = parent.contents[index-1]
if index == len(parent.contents)-1:
next_element = parent.next_sibling
next_sibling = None
else:
next_element = next_sibling = parent.contents[index+1]
o.previous_element = previous_element
if previous_element:
previous_element.next_element = o
o.next_element = next_element
if next_element:
next_element.previous_element = o
o.next_sibling = next_sibling
if next_sibling:
next_sibling.previous_sibling = o
o.previous_sibling = previous_sibling
if previous_sibling:
previous_sibling.next_sibling = o
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| agpl-3.0 |
dcherian/pyroms | examples/Beaufort/make_weight_files.py | 1 | 1429 | import pyroms
# Part of Arctic2 grid containing the Beaufort
irange=(420,580)
jrange=(470,570)
#irange=None
#jrange=None
srcgrd = pyroms.grid.get_ROMS_grid('ARCTIC2')
dstgrd = pyroms.grid.get_ROMS_grid('BEAUFORT')
pyroms.remapping.make_remap_grid_file(srcgrd,irange=irange,jrange=jrange)
pyroms.remapping.make_remap_grid_file(srcgrd,Cpos='u',irange=irange,jrange=jrange)
pyroms.remapping.make_remap_grid_file(srcgrd,Cpos='v',irange=irange,jrange=jrange)
pyroms.remapping.make_remap_grid_file(dstgrd)
pyroms.remapping.make_remap_grid_file(dstgrd,Cpos='u')
pyroms.remapping.make_remap_grid_file(dstgrd,Cpos='v')
type = ['rho','u','v']
for typ in type:
for tip in type:
grid1_file = 'remap_grid_ARCTIC2_'+str(typ)+'.nc'
grid2_file = 'remap_grid_BEAUFORT_'+str(tip)+'.nc'
interp_file1 = 'remap_weights_ARCTIC2_to_BEAUFORT_bilinear_'+str(typ)+'_to_'+str(tip)+'.nc'
interp_file2 = 'remap_weights_BEAUFORT_to_ARCTIC2_bilinear_'+str(tip)+'_to_'+str(typ)+'.nc'
map1_name = 'ARCTIC2 to BEAUFORT Bilinear Mapping'
map2_name = 'BEAUFORT to ARCTIC2 Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
print "Making "+str(interp_file1)+"..."
pyroms.remapping.compute_remap_weights(grid1_file,grid2_file,\
interp_file1,interp_file2,map1_name,\
map2_name,num_maps,map_method)
| bsd-3-clause |
Richard-West/RootTheBox | handlers/ErrorHandlers.py | 6 | 2716 | # -*- coding: utf-8 -*-
'''
Created on Mar 13, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from handlers.BaseHandlers import BaseHandler
class NotFoundHandler(BaseHandler):
def get(self, *args, **kwargs):
''' Renders the 404 page '''
self.render("public/404.html")
def post(self, *args, **kwargs):
''' Renders the 404 page '''
self.render("public/404.html")
def put(self, *args, **kwargs):
''' Log odd behavior, this should never get legitimately called '''
logging.warn("%s attempted to use PUT method" % self.request.remote_ip)
self.render("public/404.html")
def delete(self, *args, **kwargs):
''' Log odd behavior, this should never get legitimately called '''
logging.warn(
"%s attempted to use DELETE method" % self.request.remote_ip)
self.render("public/404.html")
def head(self, *args, **kwargs):
''' Log odd behavior, this should never get legitimately called '''
logging.warn(
"%s attempted to use HEAD method" % self.request.remote_ip)
self.render("public/404.html")
def options(self, *args, **kwargs):
''' Log odd behavior, this should never get legitimately called '''
logging.warn(
"%s attempted to use OPTIONS method" % self.request.remote_ip)
self.render("public/404.html")
class UnauthorizedHandler(BaseHandler):
def get(self, *args, **kwargs):
''' Renders the 403 page '''
self.clear_content_policy('object')
self.add_content_policy('object', "'self'")
locked = bool(self.get_argument('locked', '').lower() == 'true')
self.render("public/403.html", locked=locked, xsrf=False)
class NoobHandler(BaseHandler):
def get(self, *args, **kwargs):
''' Renders the noob page '''
if self.session is not None:
user = self.get_current_user()
logging.info("[NOOB ALERT] %s made a silly request, please mock him (%s)" % (
user.handle, self.request.remote_ip
))
self.render("public/noob.html")
| apache-2.0 |
owenmorris/pylucene | test/test_FilteredQuery.py | 3 | 4823 | # ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys, lucene, unittest
from PyLuceneTestCase import PyLuceneTestCase
from java.util import BitSet
from org.apache.lucene.analysis.core import WhitespaceAnalyzer
from org.apache.lucene.document import Document, Field, TextField
from org.apache.lucene.index import Term
from org.apache.lucene.search import \
FilteredQuery, Sort, SortField, TermRangeQuery, TermQuery
from org.apache.lucene.util import Bits, DocIdBitSet, Version
from org.apache.pylucene.search import PythonFilter
class FilteredQueryTestCase(PyLuceneTestCase):
"""
Unit tests ported from Java Lucene
"""
def setUp(self):
super(FilteredQueryTestCase, self).setUp()
writer = self.getWriter(analyzer=WhitespaceAnalyzer(Version.LUCENE_CURRENT))
doc = Document()
doc.add(Field("field", "one two three four five", TextField.TYPE_STORED))
doc.add(Field("sorter", "b", TextField.TYPE_STORED))
writer.addDocument(doc)
doc = Document()
doc.add(Field("field", "one two three four", TextField.TYPE_STORED))
doc.add(Field("sorter", "d", TextField.TYPE_STORED))
writer.addDocument(doc)
doc = Document()
doc.add(Field("field", "one two three y", TextField.TYPE_STORED))
doc.add(Field("sorter", "a", TextField.TYPE_STORED))
writer.addDocument(doc)
doc = Document()
doc.add(Field("field", "one two x", TextField.TYPE_STORED))
doc.add(Field("sorter", "c", TextField.TYPE_STORED))
writer.addDocument(doc)
writer.commit()
writer.close()
self.searcher = self.getSearcher()
self.query = TermQuery(Term("field", "three"))
class filter(PythonFilter):
def getDocIdSet(self, context, acceptDocs):
if acceptDocs is None:
acceptDocs = Bits.MatchAllBits(5)
bitset = BitSet(5)
if acceptDocs.get(1):
bitset.set(1)
if acceptDocs.get(3):
bitset.set(3)
return DocIdBitSet(bitset)
self.filter = filter()
def testFilteredQuery(self):
filteredquery = FilteredQuery(self.query, self.filter)
topDocs = self.searcher.search(filteredquery, 50)
self.assertEqual(1, topDocs.totalHits)
self.assertEqual(1, topDocs.scoreDocs[0].doc)
topDocs = self.searcher.search(filteredquery, None, 50,
Sort(SortField("sorter",
SortField.Type.STRING)))
self.assertEqual(1, topDocs.totalHits)
self.assertEqual(1, topDocs.scoreDocs[0].doc)
filteredquery = FilteredQuery(TermQuery(Term("field", "one")),
self.filter)
topDocs = self.searcher.search(filteredquery, 50)
self.assertEqual(2, topDocs.totalHits)
filteredquery = FilteredQuery(TermQuery(Term("field", "x")),
self.filter)
topDocs = self.searcher.search(filteredquery, 50)
self.assertEqual(1, topDocs.totalHits)
self.assertEqual(3, topDocs.scoreDocs[0].doc)
filteredquery = FilteredQuery(TermQuery(Term("field", "y")),
self.filter)
topDocs = self.searcher.search(filteredquery, 50)
self.assertEqual(0, topDocs.totalHits)
def testRangeQuery(self):
"""
This tests FilteredQuery's rewrite correctness
"""
rq = TermRangeQuery.newStringRange("sorter", "b", "d", True, True)
filteredquery = FilteredQuery(rq, self.filter)
scoreDocs = self.searcher.search(filteredquery, None, 1000).scoreDocs
self.assertEqual(2, len(scoreDocs))
if __name__ == "__main__":
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
unittest.main()
except:
pass
else:
unittest.main()
| apache-2.0 |
richardmcc/TREC-IS | Interfaces/InterfaceV0.5/project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 566 | 9386 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts) | apache-2.0 |
sontek/rethinkdb | test/common/test_report.py | 41 | 4853 | # Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import json, os, re, subprocess
import utils
def format_tests(test_root, test_tree):
tests = []
for name, test in test_tree:
command_line = test.read_file('description')
failed = test.read_file('fail_message')
if failed is None:
status = 'pass'
else:
status = 'fail'
file_infos = []
for rel_path in test.list_files(text_only=False):
path = os.path.join(test_root, name, rel_path)
file_info = { 'name': os.path.join(name, rel_path) }
if utils.guess_is_text_file(path) and os.path.getsize(path) > 0:
file_info['contents'] = open(path, "rb").read()
file_infos.append(file_info)
tests.append({
'name': name,
'id': name.replace('.', '-'),
'status': status,
'files': file_infos
})
return sorted(tests, key = lambda t: t['name'])
def generate_html(output_file, reportData):
file_out = open(output_file, 'w')
mustachePath = os.path.realpath(os.path.join(os.path.dirname(__file__), 'mustache', 'mustache.js'))
mustacheContent = open(mustachePath).read()
pageHTML = test_report_template % {"pagedata": json.dumps(reportData, separators=(',', ':')), 'mustacheContents': mustacheContent}
file_out.write(pageHTML)
def gen_report(test_root, tests):
buildbot = False
if "BUILD_NUMBER" in os.environ:
buildbot = {
'build_id': os.environ['JOB_NAME'] + ' ' + os.environ["BUILD_NUMBER"],
'build_link': os.environ['BUILD_URL']
}
git_info = {
'branch': subprocess.check_output(['git symbolic-ref HEAD 2>/dev/null || echo "HEAD"'], shell=True),
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD']),
'message': subprocess.check_output(['git', 'show', '-s', '--format=%B'])
}
tests_param = format_tests(test_root, tests)
passed = sum(1 for test in tests_param if test['status'] == 'pass')
total = len(tests_param)
# TODO: use `rethinkdb --version' instead
rethinkdb_version = subprocess.check_output([os.path.dirname(__file__) + "/../../scripts/gen-version.sh"])
reportData = {
"buildbot": buildbot,
"tests": tests_param,
"rethinkdb_version": rethinkdb_version,
"git_info": git_info,
"passed_test_count": passed,
"total_test_count": total
}
generate_html(test_root + "/test_results.html", reportData)
print('Wrote test report to "%s/test_results.html"' % os.path.realpath(test_root))
test_report_template = """
<html>
<head>
<title>Test Report</title>
<style>
td {border:1px solid grey}
.test { background: red }
.test.pass { background: green }
</style>
<script>
%(mustacheContents)s
</script>
<script>
function toggleVisibility(targetId) {
var target = document.getElementById(targetId);
if (target != null) {
if (target.style.display == "none") {
target.style.display = null;
} else {
target.style.display = "none";
}
}
}
pageData = %(pagedata)s;
function displayData() {
var template = document.getElementById("handlebars-template").textContent;
document.body.innerHTML = Mustache.to_html(template, pageData);
}
</script>
</head>
<body onload="displayData()">
This should be replaced by the conetent in a moment.
<script id="handlebars-template" type="text/x-handlebars-template">
<h1>Rethinkdb {{ rethinkdb_version }}</h1>
{{ #buildbot }}
<p>Build: <a href="{{ buildbot.build_link }}">{{ buildbot.build_id }}</a>
{{ /buildbot }}
<p>Branch: <a href="https://github.com/rethinkdb/rethinkdb/tree/{{ git_info.branch }}">{{ git_info.branch }}</a>
<p>Commit: <a href="https://github.com/rethinkdb/rethinkdb/commit/{{ git_info.commit }}">{{ git_info.commit }}</a>
<p>Commit message:
<pre>{{ git_info.message }}</pre>
<p>Passed {{ passed }} of {{ total }} tests</p>
<table style='width:100%%'>
{{#tests}}
<tr>
<td>{{name}}</td>
<td class="test {{ status }}">
<a href='#{{ id }}' onclick='toggleVisibility("{{ id }}")'>{{status}}</a>
</td>
<td width='100%%'></td></tr>
<tr id='{{ id }}' style='display:none'>
<td colspan='4'>
{{ #files }}
<ul><li><a href="{{ name }}">{{ name }}</a></ul>
<div style='border: 1px solid black'>
<pre>{{ contents }}</pre>
</div>
{{ /files }}
</td></tr>
{{ /tests }}
</table>
</script>
</body>
</html>
"""
| agpl-3.0 |
yobin/saepy-log | pygments/lexers/text.py | 41 | 67382 | # -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt, ClassNotFound
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'PropertiesLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer', 'HttpLexer',
'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg']
filenames = ['*.ini', '*.cfg']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
*New in Pygments 1.6.*
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.*', String, '#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
*New in Pygments 1.4.*
"""
name = 'Properties'
aliases = ['properties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'\s+', Text),
(r'(?:[;#]|//).*$', Comment),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
],
}
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Base Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
(\S+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
(\S+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"\S+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9_]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([A-Za-z-]+:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
]
actions = [
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid\.conf',
]
actions_stats = [
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
]
actions_log = ["status", "enable", "disable", "clear"]
acls = [
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
]
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
def makelistre(list):
return r'\b(?:' + '|'.join(list) + r')\b'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )(\S+)',
bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'}', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake', 'CMakeLists.txt']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
*New in Pygments 1.5.*
"""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE)( +)([^ ]+)( +)'
r'(HTTPS?)(/)(1\.[01])(\r?\n|$)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTPS?)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Text),
(r"#.*?$", Comment),
],
}
class HxmlLexer(RegexLexer):
"""
Lexer for `haXe build <http://haxe.org/doc/compiler>`_ files.
*New in Pygments 1.6.*
"""
name = 'Hxml'
aliases = ['haxeml', 'hxml']
filenames = ['*.hxml']
tokens = {
'root': [
# Seperator
(r'(--)(next)', bygroups(Punctuation, Generic.Heading)),
# Compiler switches with one dash
(r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)),
# Compilerswitches with two dashes
(r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|'
r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)),
# Targets and other options that take an argument
(r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|'
r'cp|cmd)( +)(.+)',
bygroups(Punctuation, Keyword, Whitespace, String)),
# Options that take only numerical arguments
(r'(-)(swf-version)( +)(\d+)',
bygroups(Punctuation, Keyword, Number.Integer)),
# An Option that defines the size, the fps and the background
# color of an flash movie
(r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})',
bygroups(Punctuation, Keyword, Whitespace, Number.Integer,
Punctuation, Number.Integer, Punctuation, Number.Integer,
Punctuation, Number.Hex)),
# options with two dashes that takes arguments
(r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)'
r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)),
# Single line comment, multiline ones are not allowed.
(r'#.*', Comment.Single)
]
}
| mit |
konkasoftci/azure-linux-automation | remote-scripts/start-server.py | 8 | 1730 | #!/usr/bin/python
##########################################
#THIS SCRIPT ACCETPS SOME SERVER PARAMETERS.
#PLEASE RUN THE SCRIPT WITH -h OR -help FOR MORE DETAILS.
##########################################
from azuremodules import *
import argparse
import sys
#for error checking
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--udp', help='switch : starts the server in udp data packets listening mode.', choices=['yes', 'no'] )
parser.add_argument('-p', '--port', help='specifies which port should be used', required=True, type= int)
#parser.add_argument('-m', '--maxsegdisplay', help='Maximum Segment Size display ', choices=['yes', 'no'])
#parser.add_argument('-M', '--maxsegset', help='Maximum Segment Size Settings', type = int)
parser.add_argument('-m', '--mss_print', help='Maximum Segment Size display ', choices=['yes', 'no'])
parser.add_argument('-M', '--mss', help='Maximum Segment Size Settings', type = int)
parser.add_argument('-i', '--interval', help='specifies frequency of the output to be displyed on screen', type= int)
args = parser.parse_args()
#if no value specified then stop
command = 'iperf -s' + ' -p' + str(args.port) + ' -f K'
if args.interval != None :
command = command + ' -i' + str(args.interval)
if args.udp == 'yes':
command = command + ' -u'
if args.mss != None:
command = command + ' -M' + str(args.mss)
if args.mss_print == 'yes':
command = command + ' -m'
#finalCommand = 'nohup ' + command + ' >> iperf-server.txt &'
finalCommand = command + ' >> iperf-server.txt'
server = finalCommand
print(server)
#Run('echo "TestStarted" > iperf-server.txt')
StopServer()
StartServer(server)
#Run('echo "TestCompleted" >> iperf-server.txt')
| apache-2.0 |
abenzbiria/clients_odoo | addons/hr_payroll/wizard/hr_payroll_contribution_register_report.py | 337 | 2074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil import relativedelta
from openerp.osv import fields, osv
class payslip_lines_contribution_register(osv.osv_memory):
_name = 'payslip.lines.contribution.register'
_description = 'PaySlip Lines by Contribution Registers'
_columns = {
'date_from': fields.date('Date From', required=True),
'date_to': fields.date('Date To', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
}
def print_report(self, cr, uid, ids, context=None):
datas = {
'ids': context.get('active_ids', []),
'model': 'hr.contribution.register',
'form': self.read(cr, uid, ids, context=context)[0]
}
return self.pool['report'].get_action(
cr, uid, [], 'hr_payroll.report_contributionregister', data=datas, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eHealthAfrica/formhub | formhub/preset/mysql_test.py | 2 | 1111 | # this system uses structured settings.py as defined in http://www.slideshare.net/jacobian/the-best-and-worst-of-django
#
# this example third-level staging file overrides some definitions in staging_example.py
# so that it returns the same definitions as the former localsettings.py.examples in the formhub distribution.
#
try:
from staging import * # get most settings from staging.py (which in turn, imports from settings.py)
except ImportError:
import sys, django
django.utils.six.reraise(RuntimeError, *sys.exc_info()[1:]) # use RuntimeError to extend the traceback
except:
raise
# # # now override the settings which came from staging # # # #
# choose a different database...
# mysql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test',
'USER': 'adotest',
'PASSWORD': '12345678', # in production, use something like: os.environ['MY_DB_PASSWORD']
'HOST': 'cdc-staging.eocng.org'
}
}
# Make a unique unique key just for testing, and don't share it with anybody.
SECRET_KEY = 'mlfs33^s1l4xf6a36$0#j%dd*sisfoi&)&4s-v=91#^l01v)*j'
| bsd-2-clause |
total-impact/depsy | providers/github.py | 5 | 1068 | import requests
from requests.auth import HTTPBasicAuth
import os
import logging
logger = logging.getLogger("github")
# generated the "OAuth Personal Access Token" token here: https://github.com/settings/tokens/new
user = os.environ["GITHUB_OAUTH_USERNAME"]
password = os.environ["GITHUB_OAUTH_ACCESS_TOKEN"]
users_url_template = "https://api.github.com/users/%s"
all_repos_url_template = "https://api.github.com/users/%s/repos?per_page=100"
repos_url_template = "https://api.github.com/repos/%s/%s?per_page=100"
def get_profile_data(username):
users_url = users_url_template % username
profile_data = requests.get(users_url, auth=(user, password))
return profile_data.json()
def get_all_repo_data(username):
all_repos_url = all_repos_url_template % username
repo_data = requests.get(all_repos_url, auth=(user, password))
return repo_data.json()
def get_repo_data(username, reponame):
repos_url = repos_url_template % (username, reponame)
repo_data = requests.get(repos_url, auth=(user, password))
return repo_data.json()
| mit |
emedinaa/contentbox | third_party/unidecode/x02c.py | 4 | 3853 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'L', # 0x60
'l', # 0x61
'L', # 0x62
'P', # 0x63
'R', # 0x64
'a', # 0x65
't', # 0x66
'H', # 0x67
'h', # 0x68
'K', # 0x69
'k', # 0x6a
'Z', # 0x6b
'z', # 0x6c
'', # 0x6d
'M', # 0x6e
'A', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| apache-2.0 |
nmartensen/pandas | asv_bench/benchmarks/categoricals.py | 3 | 2803 | from .pandas_vb_common import *
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Categoricals(object):
goal_time = 0.2
def setup(self):
N = 100000
self.s = pd.Series((list('aabbcd') * N)).astype('category')
self.a = pd.Categorical((list('aabbcd') * N))
self.b = pd.Categorical((list('bbcdjk') * N))
self.categories = list('abcde')
self.cat_idx = Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range(
'1995-01-01 00:00:00', periods=10000, freq='s'))
def time_concat(self):
concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
def time_constructor_regular(self):
Categorical(self.values, self.categories)
def time_constructor_fastpath(self):
Categorical(self.codes, self.cat_idx, fastpath=True)
def time_constructor_datetimes(self):
Categorical(self.datetimes)
def time_constructor_datetimes_with_nat(self):
t = self.datetimes
t.iloc[-1] = pd.NaT
Categorical(t)
class Categoricals2(object):
goal_time = 0.2
def setup(self):
n = 500000
np.random.seed(2718281)
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = Series(arr).astype('category')
self.sel = self.ts.loc[[0]]
def time_value_counts(self):
self.ts.value_counts(dropna=False)
def time_value_counts_dropna(self):
self.ts.value_counts(dropna=True)
def time_rendering(self):
str(self.sel)
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class Categoricals3(object):
goal_time = 0.2
def setup(self):
N = 100000
ncats = 100
self.s1 = Series(np.array(tm.makeCategoricalIndex(N, ncats)))
self.s1_cat = self.s1.astype('category')
self.s1_cat_ordered = self.s1.astype('category', ordered=True)
self.s2 = Series(np.random.randint(0, ncats, size=N))
self.s2_cat = self.s2.astype('category')
self.s2_cat_ordered = self.s2.astype('category', ordered=True)
def time_rank_string(self):
self.s1.rank()
def time_rank_string_cat(self):
self.s1_cat.rank()
def time_rank_string_cat_ordered(self):
self.s1_cat_ordered.rank()
def time_rank_int(self):
self.s2.rank()
def time_rank_int_cat(self):
self.s2_cat.rank()
def time_rank_int_cat_ordered(self):
self.s2_cat_ordered.rank()
| bsd-3-clause |
heeraj123/oh-mainline | vendor/packages/twisted/twisted/conch/test/test_helper.py | 17 | 18248 | # -*- test-case-name: twisted.conch.test.test_helper -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.conch.insults import helper
from twisted.conch.insults.insults import G0, G1, G2, G3
from twisted.conch.insults.insults import modes, privateModes
from twisted.conch.insults.insults import NORMAL, BOLD, UNDERLINE, BLINK, REVERSE_VIDEO
from twisted.trial import unittest
WIDTH = 80
HEIGHT = 24
class BufferTestCase(unittest.TestCase):
def setUp(self):
self.term = helper.TerminalBuffer()
self.term.connectionMade()
def testInitialState(self):
self.assertEquals(self.term.width, WIDTH)
self.assertEquals(self.term.height, HEIGHT)
self.assertEquals(str(self.term),
'\n' * (HEIGHT - 1))
self.assertEquals(self.term.reportCursorPosition(), (0, 0))
def test_initialPrivateModes(self):
"""
Verify that only DEC Auto Wrap Mode (DECAWM) and DEC Text Cursor Enable
Mode (DECTCEM) are initially in the Set Mode (SM) state.
"""
self.assertEqual(
{privateModes.AUTO_WRAP: True,
privateModes.CURSOR_MODE: True},
self.term.privateModes)
def test_carriageReturn(self):
"""
C{"\r"} moves the cursor to the first column in the current row.
"""
self.term.cursorForward(5)
self.term.cursorDown(3)
self.assertEqual(self.term.reportCursorPosition(), (5, 3))
self.term.insertAtCursor("\r")
self.assertEqual(self.term.reportCursorPosition(), (0, 3))
def test_linefeed(self):
"""
C{"\n"} moves the cursor to the next row without changing the column.
"""
self.term.cursorForward(5)
self.assertEqual(self.term.reportCursorPosition(), (5, 0))
self.term.insertAtCursor("\n")
self.assertEqual(self.term.reportCursorPosition(), (5, 1))
def test_newline(self):
"""
C{write} transforms C{"\n"} into C{"\r\n"}.
"""
self.term.cursorForward(5)
self.term.cursorDown(3)
self.assertEqual(self.term.reportCursorPosition(), (5, 3))
self.term.write("\n")
self.assertEqual(self.term.reportCursorPosition(), (0, 4))
def test_setPrivateModes(self):
"""
Verify that L{helper.TerminalBuffer.setPrivateModes} changes the Set
Mode (SM) state to "set" for the private modes it is passed.
"""
expected = self.term.privateModes.copy()
self.term.setPrivateModes([privateModes.SCROLL, privateModes.SCREEN])
expected[privateModes.SCROLL] = True
expected[privateModes.SCREEN] = True
self.assertEqual(expected, self.term.privateModes)
def test_resetPrivateModes(self):
"""
Verify that L{helper.TerminalBuffer.resetPrivateModes} changes the Set
Mode (SM) state to "reset" for the private modes it is passed.
"""
expected = self.term.privateModes.copy()
self.term.resetPrivateModes([privateModes.AUTO_WRAP, privateModes.CURSOR_MODE])
del expected[privateModes.AUTO_WRAP]
del expected[privateModes.CURSOR_MODE]
self.assertEqual(expected, self.term.privateModes)
def testCursorDown(self):
self.term.cursorDown(3)
self.assertEquals(self.term.reportCursorPosition(), (0, 3))
self.term.cursorDown()
self.assertEquals(self.term.reportCursorPosition(), (0, 4))
self.term.cursorDown(HEIGHT)
self.assertEquals(self.term.reportCursorPosition(), (0, HEIGHT - 1))
def testCursorUp(self):
self.term.cursorUp(5)
self.assertEquals(self.term.reportCursorPosition(), (0, 0))
self.term.cursorDown(20)
self.term.cursorUp(1)
self.assertEquals(self.term.reportCursorPosition(), (0, 19))
self.term.cursorUp(19)
self.assertEquals(self.term.reportCursorPosition(), (0, 0))
def testCursorForward(self):
self.term.cursorForward(2)
self.assertEquals(self.term.reportCursorPosition(), (2, 0))
self.term.cursorForward(2)
self.assertEquals(self.term.reportCursorPosition(), (4, 0))
self.term.cursorForward(WIDTH)
self.assertEquals(self.term.reportCursorPosition(), (WIDTH, 0))
def testCursorBackward(self):
self.term.cursorForward(10)
self.term.cursorBackward(2)
self.assertEquals(self.term.reportCursorPosition(), (8, 0))
self.term.cursorBackward(7)
self.assertEquals(self.term.reportCursorPosition(), (1, 0))
self.term.cursorBackward(1)
self.assertEquals(self.term.reportCursorPosition(), (0, 0))
self.term.cursorBackward(1)
self.assertEquals(self.term.reportCursorPosition(), (0, 0))
def testCursorPositioning(self):
self.term.cursorPosition(3, 9)
self.assertEquals(self.term.reportCursorPosition(), (3, 9))
def testSimpleWriting(self):
s = "Hello, world."
self.term.write(s)
self.assertEquals(
str(self.term),
s + '\n' +
'\n' * (HEIGHT - 2))
def testOvertype(self):
s = "hello, world."
self.term.write(s)
self.term.cursorBackward(len(s))
self.term.resetModes([modes.IRM])
self.term.write("H")
self.assertEquals(
str(self.term),
("H" + s[1:]) + '\n' +
'\n' * (HEIGHT - 2))
def testInsert(self):
s = "ello, world."
self.term.write(s)
self.term.cursorBackward(len(s))
self.term.setModes([modes.IRM])
self.term.write("H")
self.assertEquals(
str(self.term),
("H" + s) + '\n' +
'\n' * (HEIGHT - 2))
def testWritingInTheMiddle(self):
s = "Hello, world."
self.term.cursorDown(5)
self.term.cursorForward(5)
self.term.write(s)
self.assertEquals(
str(self.term),
'\n' * 5 +
(self.term.fill * 5) + s + '\n' +
'\n' * (HEIGHT - 7))
def testWritingWrappedAtEndOfLine(self):
s = "Hello, world."
self.term.cursorForward(WIDTH - 5)
self.term.write(s)
self.assertEquals(
str(self.term),
s[:5].rjust(WIDTH) + '\n' +
s[5:] + '\n' +
'\n' * (HEIGHT - 3))
def testIndex(self):
self.term.index()
self.assertEquals(self.term.reportCursorPosition(), (0, 1))
self.term.cursorDown(HEIGHT)
self.assertEquals(self.term.reportCursorPosition(), (0, HEIGHT - 1))
self.term.index()
self.assertEquals(self.term.reportCursorPosition(), (0, HEIGHT - 1))
def testReverseIndex(self):
self.term.reverseIndex()
self.assertEquals(self.term.reportCursorPosition(), (0, 0))
self.term.cursorDown(2)
self.assertEquals(self.term.reportCursorPosition(), (0, 2))
self.term.reverseIndex()
self.assertEquals(self.term.reportCursorPosition(), (0, 1))
def test_nextLine(self):
"""
C{nextLine} positions the cursor at the beginning of the row below the
current row.
"""
self.term.nextLine()
self.assertEquals(self.term.reportCursorPosition(), (0, 1))
self.term.cursorForward(5)
self.assertEquals(self.term.reportCursorPosition(), (5, 1))
self.term.nextLine()
self.assertEquals(self.term.reportCursorPosition(), (0, 2))
def testSaveCursor(self):
self.term.cursorDown(5)
self.term.cursorForward(7)
self.assertEquals(self.term.reportCursorPosition(), (7, 5))
self.term.saveCursor()
self.term.cursorDown(7)
self.term.cursorBackward(3)
self.assertEquals(self.term.reportCursorPosition(), (4, 12))
self.term.restoreCursor()
self.assertEquals(self.term.reportCursorPosition(), (7, 5))
def testSingleShifts(self):
self.term.singleShift2()
self.term.write('Hi')
ch = self.term.getCharacter(0, 0)
self.assertEquals(ch[0], 'H')
self.assertEquals(ch[1].charset, G2)
ch = self.term.getCharacter(1, 0)
self.assertEquals(ch[0], 'i')
self.assertEquals(ch[1].charset, G0)
self.term.singleShift3()
self.term.write('!!')
ch = self.term.getCharacter(2, 0)
self.assertEquals(ch[0], '!')
self.assertEquals(ch[1].charset, G3)
ch = self.term.getCharacter(3, 0)
self.assertEquals(ch[0], '!')
self.assertEquals(ch[1].charset, G0)
def testShifting(self):
s1 = "Hello"
s2 = "World"
s3 = "Bye!"
self.term.write("Hello\n")
self.term.shiftOut()
self.term.write("World\n")
self.term.shiftIn()
self.term.write("Bye!\n")
g = G0
h = 0
for s in (s1, s2, s3):
for i in range(len(s)):
ch = self.term.getCharacter(i, h)
self.assertEquals(ch[0], s[i])
self.assertEquals(ch[1].charset, g)
g = g == G0 and G1 or G0
h += 1
def testGraphicRendition(self):
self.term.selectGraphicRendition(BOLD, UNDERLINE, BLINK, REVERSE_VIDEO)
self.term.write('W')
self.term.selectGraphicRendition(NORMAL)
self.term.write('X')
self.term.selectGraphicRendition(BLINK)
self.term.write('Y')
self.term.selectGraphicRendition(BOLD)
self.term.write('Z')
ch = self.term.getCharacter(0, 0)
self.assertEquals(ch[0], 'W')
self.failUnless(ch[1].bold)
self.failUnless(ch[1].underline)
self.failUnless(ch[1].blink)
self.failUnless(ch[1].reverseVideo)
ch = self.term.getCharacter(1, 0)
self.assertEquals(ch[0], 'X')
self.failIf(ch[1].bold)
self.failIf(ch[1].underline)
self.failIf(ch[1].blink)
self.failIf(ch[1].reverseVideo)
ch = self.term.getCharacter(2, 0)
self.assertEquals(ch[0], 'Y')
self.failUnless(ch[1].blink)
self.failIf(ch[1].bold)
self.failIf(ch[1].underline)
self.failIf(ch[1].reverseVideo)
ch = self.term.getCharacter(3, 0)
self.assertEquals(ch[0], 'Z')
self.failUnless(ch[1].blink)
self.failUnless(ch[1].bold)
self.failIf(ch[1].underline)
self.failIf(ch[1].reverseVideo)
def testColorAttributes(self):
s1 = "Merry xmas"
s2 = "Just kidding"
self.term.selectGraphicRendition(helper.FOREGROUND + helper.RED,
helper.BACKGROUND + helper.GREEN)
self.term.write(s1 + "\n")
self.term.selectGraphicRendition(NORMAL)
self.term.write(s2 + "\n")
for i in range(len(s1)):
ch = self.term.getCharacter(i, 0)
self.assertEquals(ch[0], s1[i])
self.assertEquals(ch[1].charset, G0)
self.assertEquals(ch[1].bold, False)
self.assertEquals(ch[1].underline, False)
self.assertEquals(ch[1].blink, False)
self.assertEquals(ch[1].reverseVideo, False)
self.assertEquals(ch[1].foreground, helper.RED)
self.assertEquals(ch[1].background, helper.GREEN)
for i in range(len(s2)):
ch = self.term.getCharacter(i, 1)
self.assertEquals(ch[0], s2[i])
self.assertEquals(ch[1].charset, G0)
self.assertEquals(ch[1].bold, False)
self.assertEquals(ch[1].underline, False)
self.assertEquals(ch[1].blink, False)
self.assertEquals(ch[1].reverseVideo, False)
self.assertEquals(ch[1].foreground, helper.WHITE)
self.assertEquals(ch[1].background, helper.BLACK)
def testEraseLine(self):
s1 = 'line 1'
s2 = 'line 2'
s3 = 'line 3'
self.term.write('\n'.join((s1, s2, s3)) + '\n')
self.term.cursorPosition(1, 1)
self.term.eraseLine()
self.assertEquals(
str(self.term),
s1 + '\n' +
'\n' +
s3 + '\n' +
'\n' * (HEIGHT - 4))
def testEraseToLineEnd(self):
s = 'Hello, world.'
self.term.write(s)
self.term.cursorBackward(5)
self.term.eraseToLineEnd()
self.assertEquals(
str(self.term),
s[:-5] + '\n' +
'\n' * (HEIGHT - 2))
def testEraseToLineBeginning(self):
s = 'Hello, world.'
self.term.write(s)
self.term.cursorBackward(5)
self.term.eraseToLineBeginning()
self.assertEquals(
str(self.term),
s[-4:].rjust(len(s)) + '\n' +
'\n' * (HEIGHT - 2))
def testEraseDisplay(self):
self.term.write('Hello world\n')
self.term.write('Goodbye world\n')
self.term.eraseDisplay()
self.assertEquals(
str(self.term),
'\n' * (HEIGHT - 1))
def testEraseToDisplayEnd(self):
s1 = "Hello world"
s2 = "Goodbye world"
self.term.write('\n'.join((s1, s2, '')))
self.term.cursorPosition(5, 1)
self.term.eraseToDisplayEnd()
self.assertEquals(
str(self.term),
s1 + '\n' +
s2[:5] + '\n' +
'\n' * (HEIGHT - 3))
def testEraseToDisplayBeginning(self):
s1 = "Hello world"
s2 = "Goodbye world"
self.term.write('\n'.join((s1, s2)))
self.term.cursorPosition(5, 1)
self.term.eraseToDisplayBeginning()
self.assertEquals(
str(self.term),
'\n' +
s2[6:].rjust(len(s2)) + '\n' +
'\n' * (HEIGHT - 3))
def testLineInsertion(self):
s1 = "Hello world"
s2 = "Goodbye world"
self.term.write('\n'.join((s1, s2)))
self.term.cursorPosition(7, 1)
self.term.insertLine()
self.assertEquals(
str(self.term),
s1 + '\n' +
'\n' +
s2 + '\n' +
'\n' * (HEIGHT - 4))
def testLineDeletion(self):
s1 = "Hello world"
s2 = "Middle words"
s3 = "Goodbye world"
self.term.write('\n'.join((s1, s2, s3)))
self.term.cursorPosition(9, 1)
self.term.deleteLine()
self.assertEquals(
str(self.term),
s1 + '\n' +
s3 + '\n' +
'\n' * (HEIGHT - 3))
class FakeDelayedCall:
called = False
cancelled = False
def __init__(self, fs, timeout, f, a, kw):
self.fs = fs
self.timeout = timeout
self.f = f
self.a = a
self.kw = kw
def active(self):
return not (self.cancelled or self.called)
def cancel(self):
self.cancelled = True
# self.fs.calls.remove(self)
def call(self):
self.called = True
self.f(*self.a, **self.kw)
class FakeScheduler:
def __init__(self):
self.calls = []
def callLater(self, timeout, f, *a, **kw):
self.calls.append(FakeDelayedCall(self, timeout, f, a, kw))
return self.calls[-1]
class ExpectTestCase(unittest.TestCase):
def setUp(self):
self.term = helper.ExpectableBuffer()
self.term.connectionMade()
self.fs = FakeScheduler()
def testSimpleString(self):
result = []
d = self.term.expect("hello world", timeout=1, scheduler=self.fs)
d.addCallback(result.append)
self.term.write("greeting puny earthlings\n")
self.failIf(result)
self.term.write("hello world\n")
self.failUnless(result)
self.assertEquals(result[0].group(), "hello world")
self.assertEquals(len(self.fs.calls), 1)
self.failIf(self.fs.calls[0].active())
def testBrokenUpString(self):
result = []
d = self.term.expect("hello world")
d.addCallback(result.append)
self.failIf(result)
self.term.write("hello ")
self.failIf(result)
self.term.write("worl")
self.failIf(result)
self.term.write("d")
self.failUnless(result)
self.assertEquals(result[0].group(), "hello world")
def testMultiple(self):
result = []
d1 = self.term.expect("hello ")
d1.addCallback(result.append)
d2 = self.term.expect("world")
d2.addCallback(result.append)
self.failIf(result)
self.term.write("hello")
self.failIf(result)
self.term.write(" ")
self.assertEquals(len(result), 1)
self.term.write("world")
self.assertEquals(len(result), 2)
self.assertEquals(result[0].group(), "hello ")
self.assertEquals(result[1].group(), "world")
def testSynchronous(self):
self.term.write("hello world")
result = []
d = self.term.expect("hello world")
d.addCallback(result.append)
self.failUnless(result)
self.assertEquals(result[0].group(), "hello world")
def testMultipleSynchronous(self):
self.term.write("goodbye world")
result = []
d1 = self.term.expect("bye")
d1.addCallback(result.append)
d2 = self.term.expect("world")
d2.addCallback(result.append)
self.assertEquals(len(result), 2)
self.assertEquals(result[0].group(), "bye")
self.assertEquals(result[1].group(), "world")
def _cbTestTimeoutFailure(self, res):
self.assert_(hasattr(res, 'type'))
self.assertEqual(res.type, helper.ExpectationTimeout)
def testTimeoutFailure(self):
d = self.term.expect("hello world", timeout=1, scheduler=self.fs)
d.addBoth(self._cbTestTimeoutFailure)
self.fs.calls[0].call()
def testOverlappingTimeout(self):
self.term.write("not zoomtastic")
result = []
d1 = self.term.expect("hello world", timeout=1, scheduler=self.fs)
d1.addBoth(self._cbTestTimeoutFailure)
d2 = self.term.expect("zoom")
d2.addCallback(result.append)
self.fs.calls[0].call()
self.assertEquals(len(result), 1)
self.assertEquals(result[0].group(), "zoom")
| agpl-3.0 |
google-research/federated | utils/models/resnet_models.py | 1 | 11325 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet v2 model for Keras using Batch or Group Normalization.
Related papers/blogs:
- http://arxiv.org/pdf/1603.05027v2.pdf
"""
import tensorflow as tf
import tensorflow_addons.layers.normalizations as tfa_norms
BATCH_NORM_DECAY = 0.997
BATCH_NORM_EPSILON = 1e-5
L2_WEIGHT_DECAY = 1e-4
def _norm_relu(input_tensor, norm='group'):
"""Helper function to make a Norm -> ReLU block."""
if tf.keras.backend.image_data_format() == 'channels_last':
channel_axis = 3
else:
channel_axis = 1
if norm == 'group':
x = tfa_norms.GroupNormalization(axis=channel_axis)(input_tensor)
else:
x = tf.keras.layers.BatchNormalization(
axis=channel_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(input_tensor)
return tf.keras.layers.Activation('relu')(x)
def _conv_norm_relu(input_tensor,
filters,
kernel_size,
strides=(1, 1),
norm='group',
seed=0):
"""Helper function to make a Conv -> Norm -> ReLU block."""
x = tf.keras.layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY))(
input_tensor)
return _norm_relu(x, norm=norm)
def _norm_relu_conv(input_tensor,
filters,
kernel_size,
strides=(1, 1),
norm='group',
seed=0):
"""Helper function to make a Norm -> ReLU -> Conv block."""
x = _norm_relu(input_tensor, norm=norm)
x = tf.keras.layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY))(
x)
return x
def _shortcut(input_tensor, residual, norm='group', seed=0):
"""Adds a shortcut between input and the residual."""
input_shape = tf.keras.backend.int_shape(input_tensor)
residual_shape = tf.keras.backend.int_shape(residual)
if tf.keras.backend.image_data_format() == 'channels_last':
row_axis = 1
col_axis = 2
channel_axis = 3
else:
channel_axis = 1
row_axis = 2
col_axis = 3
stride_width = int(round(input_shape[row_axis] / residual_shape[row_axis]))
stride_height = int(round(input_shape[col_axis] / residual_shape[col_axis]))
equal_channels = input_shape[channel_axis] == residual_shape[channel_axis]
shortcut = input_tensor
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = tf.keras.layers.Conv2D(
filters=residual_shape[channel_axis],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding='valid',
use_bias=False,
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY))(
shortcut)
if norm == 'group':
shortcut = tfa_norms.GroupNormalization(axis=channel_axis)(shortcut)
else:
shortcut = tf.keras.layers.BatchNormalization(
axis=channel_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(shortcut)
return tf.keras.layers.add([shortcut, residual])
def _basic_block(input_tensor,
filters,
strides=(1, 1),
avoid_norm=False,
norm='group',
seed=0):
"""Basic convolutional block for use on resnets with <= 34 layers."""
if avoid_norm:
x = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
strides=strides,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY))(
input_tensor)
else:
x = _norm_relu_conv(
input_tensor,
filters=filters,
kernel_size=(3, 3),
strides=strides,
norm=norm,
seed=seed)
x = _norm_relu_conv(
x,
filters=filters,
kernel_size=(3, 3),
strides=strides,
norm=norm,
seed=seed)
return _shortcut(input_tensor, x, norm=norm, seed=seed)
def _bottleneck_block(input_tensor,
filters,
strides=(1, 1),
avoid_norm=False,
norm='group',
seed=0):
"""Bottleneck convolutional block for use on resnets with > 34 layers."""
if avoid_norm:
x = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(1, 1),
strides=strides,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY))(
input_tensor)
else:
x = _norm_relu_conv(
input_tensor,
filters=filters,
kernel_size=(1, 1),
strides=strides,
norm=norm,
seed=seed)
x = _norm_relu_conv(
x,
filters=filters,
kernel_size=(3, 3),
strides=strides,
norm=norm,
seed=seed)
x = _norm_relu_conv(
x,
filters=filters * 4,
kernel_size=(1, 1),
strides=strides,
norm=norm,
seed=seed)
return _shortcut(input_tensor, x, norm=norm, seed=seed)
def _residual_block(input_tensor,
block_function,
filters,
num_blocks,
strides=(1, 1),
is_first_layer=False,
norm='group',
seed=0):
"""Builds a residual block with repeating bottleneck or basic blocks."""
x = input_tensor
for i in range(num_blocks):
avoid_norm = is_first_layer and i == 0
x = block_function(
x,
filters=filters,
strides=strides,
avoid_norm=avoid_norm,
norm=norm,
seed=seed)
return x
def create_resnet(input_shape,
num_classes=10,
block='bottleneck',
repetitions=None,
initial_filters=64,
initial_strides=(2, 2),
initial_kernel_size=(7, 7),
initial_pooling='max',
norm='group',
seed=0):
"""Instantiates a ResNet v2 model with Group Normalization.
Instantiates the architecture from http://arxiv.org/pdf/1603.05027v2.pdf.
The ResNet contains stages of residual blocks. Each residual block contains
some number of...
Args:
input_shape: A tuple of length 3 describing the number of rows, columns, and
channels of an input. Can be in channel-first or channel-last format.
num_classes: Number of output classes.
block: Whether to use a bottleneck or basic block within each stage.
repetitions: A list of integers describing the number of blocks within each
stage. If None, defaults to the resnet50 repetitions of [3, 4, 6, 3].
initial_filters: The number of filters in the initial conv layer.
initial_strides: The strides in the initial conv layer.
initial_kernel_size: The kernel size for the initial conv layer.
initial_pooling: The type of pooling after the initial conv layer.
norm: Type of normalization to be used. Can be 'group' or 'batch'.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
A `tf.keras.Model`.
Raises:
Exception: Input shape should be a tuple of length 3.
"""
if len(input_shape) != 3:
raise Exception(
'Input shape should be a tuple of length 3.')
if repetitions is None:
repetitions = [3, 4, 6, 3]
if block == 'basic':
block_fn = _basic_block
elif block == 'bottleneck':
block_fn = _bottleneck_block
img_input = tf.keras.layers.Input(shape=input_shape)
x = _conv_norm_relu(
img_input,
filters=initial_filters,
kernel_size=initial_kernel_size,
strides=initial_strides,
norm=norm,
seed=seed)
if initial_pooling == 'max':
x = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=initial_strides, padding='same')(x)
filters = initial_filters
for i, r in enumerate(repetitions):
x = _residual_block(
x,
block_fn,
filters=filters,
num_blocks=r,
is_first_layer=(i == 0),
norm=norm,
seed=seed)
filters *= 2
# Final activation in the residual blocks
x = _norm_relu(x, norm=norm)
# Classification block
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(
num_classes,
activation='softmax',
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01, seed=seed),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY))(
x)
model = tf.keras.models.Model(img_input, x)
return model
def create_resnet18(input_shape, num_classes, norm='group', seed=0):
"""ResNet with 18 layers and basic residual blocks."""
return create_resnet(
input_shape,
num_classes,
'basic',
repetitions=[2, 2, 2, 2],
norm=norm,
seed=seed)
def create_resnet34(input_shape, num_classes, norm='group', seed=0):
"""ResNet with 34 layers and basic residual blocks."""
return create_resnet(
input_shape,
num_classes,
'basic',
repetitions=[3, 4, 6, 3],
norm=norm,
seed=seed)
def create_resnet50(input_shape, num_classes, norm='group', seed=0):
"""ResNet with 50 layers and bottleneck residual blocks."""
return create_resnet(
input_shape,
num_classes,
'bottleneck',
repetitions=[3, 4, 6, 3],
norm=norm,
seed=seed)
def create_resnet101(input_shape, num_classes, norm='group', seed=0):
"""ResNet with 101 layers and bottleneck residual blocks."""
return create_resnet(
input_shape,
num_classes,
'bottleneck',
repetitions=[3, 4, 23, 3],
norm=norm,
seed=seed)
def create_resnet152(input_shape, num_classes, norm='group', seed=0):
"""ResNet with 152 layers and bottleneck residual blocks."""
return create_resnet(
input_shape,
num_classes,
'bottleneck',
repetitions=[3, 8, 36, 3],
norm=norm,
seed=seed)
| apache-2.0 |
ptoraskar/scrapy | scrapy/extensions/debug.py | 152 | 1890 | """
Extensions for debugging Scrapy
See documentation in docs/topics/extensions.rst
"""
import sys
import signal
import logging
import traceback
import threading
from pdb import Pdb
from scrapy.utils.engine import format_engine_status
from scrapy.utils.trackref import format_live_refs
logger = logging.getLogger(__name__)
class StackTraceDump(object):
def __init__(self, crawler=None):
self.crawler = crawler
try:
signal.signal(signal.SIGUSR2, self.dump_stacktrace)
signal.signal(signal.SIGQUIT, self.dump_stacktrace)
except AttributeError:
# win32 platforms don't support SIGUSR signals
pass
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def dump_stacktrace(self, signum, frame):
log_args = {
'stackdumps': self._thread_stacks(),
'enginestatus': format_engine_status(self.crawler.engine),
'liverefs': format_live_refs(),
}
logger.info("Dumping stack trace and engine status\n"
"%(enginestatus)s\n%(liverefs)s\n%(stackdumps)s",
log_args, extra={'crawler': self.crawler})
def _thread_stacks(self):
id2name = dict((th.ident, th.name) for th in threading.enumerate())
dumps = ''
for id_, frame in sys._current_frames().items():
name = id2name.get(id_, '')
dump = ''.join(traceback.format_stack(frame))
dumps += "# Thread: {0}({1})\n{2}\n".format(name, id_, dump)
return dumps
class Debugger(object):
def __init__(self):
try:
signal.signal(signal.SIGUSR2, self._enter_debugger)
except AttributeError:
# win32 platforms don't support SIGUSR signals
pass
def _enter_debugger(self, signum, frame):
Pdb().set_trace(frame.f_back)
| bsd-3-clause |
crafty78/ansible | lib/ansible/plugins/callback/dense.py | 14 | 17424 | # (c) 2016, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
from ansible.utils.color import colorize, hostcolor
HAS_OD = False
try:
from collections import OrderedDict
HAS_OD = True
except ImportError:
pass
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
import sys
# Design goals:
#
# + On screen there should only be relevant stuff
# - How far are we ? (during run, last line)
# - What issues occurred
# - What changes occurred
# - Diff output (in diff-mode)
#
# + If verbosity increases, act as default output
# So that users can easily switch to default for troubleshooting
#
# + Rewrite the output during processing
# - We use the cursor to indicate where in the task we are.
# Output after the prompt is the output of the previous task.
# - If we would clear the line at the start of a task, there would often
# be no information at all, so we leave it until it gets updated
#
# + Use the same color-conventions of Ansible
#
# + Ensure the verbose output (-v) is also dense.
# Remove information that is not essential (eg. timestamps, status)
# TODO:
#
# + Properly test for terminal capabilities, and fall back to default
# + Modify Ansible mechanism so we don't need to use sys.stdout directly
# + Find an elegant solution for progress bar line wrapping
# FIXME: Importing constants as C simply does not work, beats me :-/
#from ansible import constants as C
class C:
COLOR_HIGHLIGHT = 'white'
COLOR_VERBOSE = 'blue'
COLOR_WARN = 'bright purple'
COLOR_ERROR = 'red'
COLOR_DEBUG = 'dark gray'
COLOR_DEPRECATE = 'purple'
COLOR_SKIP = 'cyan'
COLOR_UNREACHABLE = 'bright red'
COLOR_OK = 'green'
COLOR_CHANGED = 'yellow'
# Taken from Dstat
class vt100:
black = '\033[0;30m'
darkred = '\033[0;31m'
darkgreen = '\033[0;32m'
darkyellow = '\033[0;33m'
darkblue = '\033[0;34m'
darkmagenta = '\033[0;35m'
darkcyan = '\033[0;36m'
gray = '\033[0;37m'
darkgray = '\033[1;30m'
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
blue = '\033[1;34m'
magenta = '\033[1;35m'
cyan = '\033[1;36m'
white = '\033[1;37m'
blackbg = '\033[40m'
redbg = '\033[41m'
greenbg = '\033[42m'
yellowbg = '\033[43m'
bluebg = '\033[44m'
magentabg = '\033[45m'
cyanbg = '\033[46m'
whitebg = '\033[47m'
reset = '\033[0;0m'
bold = '\033[1m'
reverse = '\033[2m'
underline = '\033[4m'
clear = '\033[2J'
# clearline = '\033[K'
clearline = '\033[2K'
save = '\033[s'
restore = '\033[u'
save_all = '\0337'
restore_all = '\0338'
linewrap = '\033[7h'
nolinewrap = '\033[7l'
up = '\033[1A'
down = '\033[1B'
right = '\033[1C'
left = '\033[1D'
colors = dict(
ok = vt100.darkgreen,
changed = vt100.darkyellow,
skipped = vt100.darkcyan,
ignored = vt100.cyanbg + vt100.red,
failed = vt100.darkred,
unreachable = vt100.red,
)
states = ( 'skipped', 'ok', 'changed', 'failed', 'unreachable' )
class CallbackModule_dense(CallbackModule_default):
'''
This is the dense callback interface, where screen estate is still valued.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'dense'
def __init__(self):
# From CallbackModule
self._display = display
if HAS_OD:
self.disabled = False
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
# Attributes to remove from results for more density
self.removed_attributes = (
# 'changed',
'delta',
# 'diff',
'end',
'failed',
'failed_when_result',
'invocation',
'start',
'stdout_lines',
)
# Initiate data structures
self.hosts = OrderedDict()
self.keep = False
self.shown_title = False
self.count = dict(play=0, handler=0, task=0)
self.type = 'foo'
# Start immediately on the first line
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
self.disabled = True
def __del__(self):
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
def _add_host(self, result, status):
name = result._host.get_name()
# Add a new status in case a failed task is ignored
if status == 'failed' and result._task.ignore_errors:
status = 'ignored'
# Check if we have to update an existing state (when looping over items)
if name not in self.hosts:
self.hosts[name] = dict(state=status)
elif states.index(self.hosts[name]['state']) < states.index(status):
self.hosts[name]['state'] = status
# Store delegated hostname, if needed
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self.hosts[name]['delegate'] = delegated_vars['ansible_host']
# Print progress bar
self._display_progress(result)
# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
# Ensure that tasks with changes/failures stay on-screen
if status in ['changed', 'failed', 'unreachable']:
self.keep = True
if self._display.verbosity == 1:
# Print task title, if needed
self._display_task_banner()
self._display_results(result, status)
def _clean_results(self, result):
# Remove non-essential atributes
for attr in self.removed_attributes:
if attr in result:
del(result[attr])
# Remove empty attributes (list, dict, str)
for attr in result.copy():
if type(result[attr]) in (list, dict, basestring, unicode):
if not result[attr]:
del(result[attr])
def _handle_exceptions(self, result):
if 'exception' in result:
# Remove the exception from the result so it's not shown every time
del result['exception']
if self._display.verbosity == 1:
return "An exception occurred during task execution. To see the full traceback, use -vvv."
def _display_progress(self, result=None):
# Always rewrite the complete line
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
# Print out each host in its own status-color
for name in self.hosts:
sys.stdout.write(' ')
if self.hosts[name].get('delegate', None):
sys.stdout.write(self.hosts[name]['delegate'] + '>')
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
# if result._result.get('diff', False):
# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
# self.keep = True
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
def _display_results(self, result, status):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
self._clean_results(result._result)
dump = ''
if result._task.action == 'include':
return
elif status == 'ok':
return
elif status == 'ignored':
dump = self._handle_exceptions(result._result)
elif status == 'failed':
dump = self._handle_exceptions(result._result)
elif status == 'unreachable':
dump = result._result['msg']
if not dump:
dump = self._dump_results(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
sys.stdout.write(colors[status] + status + ': ')
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
else:
sys.stdout.write(result._host.get_name())
sys.stdout.write(': ' + dump + '\n')
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
if status == 'changed':
self._handle_warnings(result._result)
def v2_playbook_on_play_start(self, play):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
# Reset at the start of each play
self.keep = False
self.count.update(dict(handler=0, task=0))
self.count['play'] += 1
self.play = play
# Write the next play on screen IN UPPERCASE, and make it permanent
name = play.get_name().strip()
if not name:
name = 'unnamed'
sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_task_start(self, task, is_conditional):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
# Do not clear line, since we want to retain the previous output
sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
# Reset at the start of each task
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'task'
# Enumerate task if not setup (task names are too long for dense output)
if task.get_name() != 'setup':
self.count['task'] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_handler_task_start(self, task):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
# Reset at the start of each handler
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'handler'
# Enumerate handler if not setup (handler names may be too long for dense output)
if task.get_name() != 'setup':
self.count[self.type] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_cleanup_task_start(self, task):
# TBD
sys.stdout.write('cleanup.')
sys.stdout.flush()
def v2_runner_on_failed(self, result, ignore_errors=False):
self._add_host(result, 'failed')
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
def v2_runner_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_runner_on_unreachable(self, result):
self._add_host(result, 'unreachable')
def v2_runner_on_include(self, included_file):
pass
def v2_runner_on_file_diff(self, result, diff):
sys.stdout.write(vt100.bold)
self.super_ref.v2_runner_on_file_diff(result, diff)
sys.stdout.write(vt100.reset)
def v2_on_file_diff(self, result):
sys.stdout.write(vt100.bold)
self.super_ref.v2_on_file_diff(result)
sys.stdout.write(vt100.reset)
# Old definition in v2.0
def v2_playbook_item_on_ok(self, result):
self.v2_runner_item_on_ok(result)
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
# Old definition in v2.0
def v2_playbook_item_on_failed(self, result):
self.v2_runner_item_on_failed(result)
def v2_runner_item_on_failed(self, result):
self._add_host(result, 'failed')
# Old definition in v2.0
def v2_playbook_item_on_skipped(self, result):
self.v2_runner_item_on_skipped(result)
def v2_runner_item_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_playbook_on_no_hosts_remaining(self):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_include(self, included_file):
pass
def v2_playbook_on_stats(self, stats):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
# In normal mode screen output should be sufficient, summary is redundant
if self._display.verbosity == 0:
return
sys.stdout.write(vt100.bold + vt100.underline)
sys.stdout.write('SUMMARY')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
# When using -vv or higher, simply do the default action
if display.verbosity >= 2 or not HAS_OD:
CallbackModule = CallbackModule_default
else:
CallbackModule = CallbackModule_dense
| gpl-3.0 |
CloudServer/nova | nova/tests/unit/api/openstack/compute/test_plugins/dummy_schema.py | 94 | 1276 | # Copyright 2014 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
dummy = {
'type': 'object',
'properties': {
'dummy': {
'type': 'object',
'properties': {
'val': parameter_types.name,
},
'additionalProperties': False,
},
},
'required': ['dummy'],
'additionalProperties': False,
}
dummy2 = {
'type': 'object',
'properties': {
'dummy': {
'type': 'object',
'properties': {
'val2': parameter_types.name,
},
'additionalProperties': False,
},
},
'required': ['dummy'],
'additionalProperties': False,
}
| apache-2.0 |
imruahmed/microblog | flask/lib/python2.7/site-packages/openid/urinorm.py | 159 | 5230 | import re
# from appendix B of rfc 3986 (http://www.ietf.org/rfc/rfc3986.txt)
uri_pattern = r'^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?'
uri_re = re.compile(uri_pattern)
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
#
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
#
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
uri_illegal_char_re = re.compile(
"[^-A-Za-z0-9:/?#[\]@!$&'()*+,;=._~%]", re.UNICODE)
authority_pattern = r'^([^@]*@)?([^:]*)(:.*)?'
authority_re = re.compile(authority_pattern)
pct_encoded_pattern = r'%([0-9A-Fa-f]{2})'
pct_encoded_re = re.compile(pct_encoded_pattern)
try:
unichr(0x10000)
except ValueError:
# narrow python build
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
]
IPRIVATE = [
(0xE000, 0xF8FF),
]
else:
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
]
IPRIVATE = [
(0xE000, 0xF8FF),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
_unreserved = [False] * 256
for _ in range(ord('A'), ord('Z') + 1): _unreserved[_] = True
for _ in range(ord('0'), ord('9') + 1): _unreserved[_] = True
for _ in range(ord('a'), ord('z') + 1): _unreserved[_] = True
_unreserved[ord('-')] = True
_unreserved[ord('.')] = True
_unreserved[ord('_')] = True
_unreserved[ord('~')] = True
_escapeme_re = re.compile('[%s]' % (''.join(
map(lambda (m, n): u'%s-%s' % (unichr(m), unichr(n)),
UCSCHAR + IPRIVATE)),))
def _pct_escape_unicode(char_match):
c = char_match.group()
return ''.join(['%%%X' % (ord(octet),) for octet in c.encode('utf-8')])
def _pct_encoded_replace_unreserved(mo):
try:
i = int(mo.group(1), 16)
if _unreserved[i]:
return chr(i)
else:
return mo.group().upper()
except ValueError:
return mo.group()
def _pct_encoded_replace(mo):
try:
return chr(int(mo.group(1), 16))
except ValueError:
return mo.group()
def remove_dot_segments(path):
result_segments = []
while path:
if path.startswith('../'):
path = path[3:]
elif path.startswith('./'):
path = path[2:]
elif path.startswith('/./'):
path = path[2:]
elif path == '/.':
path = '/'
elif path.startswith('/../'):
path = path[3:]
if result_segments:
result_segments.pop()
elif path == '/..':
path = '/'
if result_segments:
result_segments.pop()
elif path == '..' or path == '.':
path = ''
else:
i = 0
if path[0] == '/':
i = 1
i = path.find('/', i)
if i == -1:
i = len(path)
result_segments.append(path[:i])
path = path[i:]
return ''.join(result_segments)
def urinorm(uri):
if isinstance(uri, unicode):
uri = _escapeme_re.sub(_pct_escape_unicode, uri).encode('ascii')
illegal_mo = uri_illegal_char_re.search(uri)
if illegal_mo:
raise ValueError('Illegal characters in URI: %r at position %s' %
(illegal_mo.group(), illegal_mo.start()))
uri_mo = uri_re.match(uri)
scheme = uri_mo.group(2)
if scheme is None:
raise ValueError('No scheme specified')
scheme = scheme.lower()
if scheme not in ('http', 'https'):
raise ValueError('Not an absolute HTTP or HTTPS URI: %r' % (uri,))
authority = uri_mo.group(4)
if authority is None:
raise ValueError('Not an absolute URI: %r' % (uri,))
authority_mo = authority_re.match(authority)
if authority_mo is None:
raise ValueError('URI does not have a valid authority: %r' % (uri,))
userinfo, host, port = authority_mo.groups()
if userinfo is None:
userinfo = ''
if '%' in host:
host = host.lower()
host = pct_encoded_re.sub(_pct_encoded_replace, host)
host = unicode(host, 'utf-8').encode('idna')
else:
host = host.lower()
if port:
if (port == ':' or
(scheme == 'http' and port == ':80') or
(scheme == 'https' and port == ':443')):
port = ''
else:
port = ''
authority = userinfo + host + port
path = uri_mo.group(5)
path = pct_encoded_re.sub(_pct_encoded_replace_unreserved, path)
path = remove_dot_segments(path)
if not path:
path = '/'
query = uri_mo.group(6)
if query is None:
query = ''
fragment = uri_mo.group(8)
if fragment is None:
fragment = ''
return scheme + '://' + authority + path + query + fragment
| bsd-3-clause |
astrorafael/ema | ema/dev/todtimer.py | 1 | 7813 | # ----------------------------------------------------------------------
# Copyright (c) 2014 Rafael Gonzalez.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
import logging
import datetime
import subprocess
from ema.server import Server, Alarmable2
from ema.device import Device
from ema.intervals import Interval, Intervals
log = logging.getLogger('todtimer')
# =====================================
# Utility functions for Aux Relay Class
# =====================================
def now():
return datetime.datetime.utcnow().replace(microsecond=0).time()
def adjust(time, minutes):
''' adjust a datetime.time object by some integer minutes,
returning a new datetime.time object'''
today = datetime.date.today()
tsnow = datetime.datetime.combine(today, time)
dur = datetime.timedelta(minutes=minutes)
return (tsnow + dur).time()
def durationFromNow(time):
'''Retuns a datetime.timedelta object from given time to now'''
today = datetime.date.today()
tsnow = datetime.datetime.utcnow()
tstime = datetime.datetime.combine(today, time)
if tstime < tsnow:
tstime += datetime.timedelta(hours=24)
return tstime - tsnow
# =======================
# Time of Day Timer Class
# =======================
class Timer(Device, Alarmable2):
# Minimun active interval size in minutes
MIN_DUR = 15
INTERVAL = "interval"
INTERVALS = "Timer Active Intervals"
ACTIVE = "active"
INACTIVE = "inactive"
def __init__(self, ema, parser):
lvl = parser.get("TOD_TIMER", "tod_log")
log.setLevel(lvl)
publish_where = parser.get("TOD_TIMER","tod_publish_where").split(',')
publish_what = parser.get("TOD_TIMER","tod_publish_what").split(',')
intervals = parser.get("TOD_TIMER","tod_intervals")
poweroff = parser.getboolean("TOD_TIMER","tod_poweroff")
Device.__init__(self, publish_where, publish_what)
Alarmable2.__init__(self)
self.ema = ema
self.poweroff = poweroff
self.windows = Intervals.parse(intervals, Timer.MIN_DUR)
self.gaps = ~ self.windows
self.where = None
self.i = None
self.subscribedList = []
ema.addParameter(self)
ema.addCurrent(self)
ema.addAverage(self)
log.debug("processed %d active intervals and %d inactive intervals", len(self.windows), len(self.gaps))
# Better in EMA, so the subscribers subscribe first before
# running a time window search process
## self.onNewInterval()
# --------------------------------
# Offer the subscription Interface
# --------------------------------
def addSubscriber(self, obj):
'''Adds a object implementing the following methods:
onNewInterval()
'''
callable(getattr(obj,'onNewInterval'))
self.subscribedList.append(obj)
def delSubscriber(self, obj):
'''Removes subscribed object from the list.'''
self.subscribedList.pop(self.subscribedList.index(obj))
# ----------------------------------
# Implements the Alarmable interface
# -----------------------------------
def onTimeoutDo(self):
self.onNewInterval()
# ----------
# Properties
# ----------
@property
def current(self):
'''Return dictionary with current measured values'''
i = self.i
if self.where == Timer.ACTIVE:
return { Timer.INTERVAL: ( "%s %s" % (self.where, self.windows[i]) , 'UTC') }
else:
return { Timer.INTERVAL: ( "%s %s" % (self.where, self.gaps[i]) , 'UTC') }
@property
def average(self):
'''Return dictionary averaged values over a period of N samples'''
return { Timer.INTERVAL : ("N/A" , '') }
@property
def parameter(self):
'''Return dictionary with calibration constants'''
return {
Timer.INTERVALS : ( str(self.windows) , 'UTC') ,
}
# ------------------
# Intervals handling
# ------------------
def nextActiveIndex(self, i):
return (i + 1) % len(self.windows)
def getInterval(self, where, i):
if where == Timer.ACTIVE:
return self.windows[i]
else:
return self.gaps[i]
def getActiveInterval(self, i):
return self.windows[i]
def onNewInterval(self):
'''Executes the callbacks, triggered by alarms'''
self.findCurrentInterval()
for o in self.subscribedList:
o.onNewInterval(self.where, self.i)
def nextAlarm(self, tMID):
'''Program next alarm'''
t = int(durationFromNow(tMID).total_seconds())
log.info("Next check at %s, %d seconds from now",tMID.strftime("%H:%M:%S"), t)
self.setTimeout(t)
self.resetAlarm()
self.ema.addAlarmable(self)
def isShuttingDown(self):
'''Find if a shutdown process is under way'''
p1 = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "shutdown"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["grep", "-v", "grep"], stdin=p2.stdout, stdout=subprocess.PIPE)
output = p3.communicate()[0]
if len(output) != 0:
log.debug("Previous Shutdown under way")
return True
else:
log.debug("No previous Shutdown under way")
return False
def shutdown(self, tSHU):
'''Manages a possible shutdow request'''
# WARNING !!!!! tSHU IS GIVEN AS UTC !!!!!!
# AND SHUTDOWN REQUIRES LOCAL TIME !!!!!
# This will only work if local time is UTC as well
if self.poweroff and not self.isShuttingDown():
if tSHU > now():
tSHUstr = tSHU.strftime("%H:%M")
log.warning("Calling shutdown at %s",tSHUstr)
subprocess.Popen(['sudo','shutdown','-h', tSHUstr])
else:
log.warning("Calling shutdown now")
subprocess.Popen(['sudo','shutdown','-h', 'now'])
log.info("Programmed shutdown at %s",tSHU.strftime("%H:%M:%S"))
def findCurrentInterval(self):
'''Find the current interval'''
tNow = now()
log.debug("checking active intervals %s", self.windows)
found, i = self.windows.find(tNow)
if found:
self.where = Timer.ACTIVE
self.i = i
log.info("now (%s) we are in the active window %s", tNow.strftime("%H:%M:%S"), self.windows[i])
tSHU = adjust(self.windows[i].t1, minutes=-2)
tMID = self.gaps[i].midpoint()
else:
self.where = Timer.INACTIVE
log.debug("checking inactive intervals %s", self.gaps)
found, i = self.gaps.find(tNow)
log.info("now (%s) we are in the inactive window %s", tNow.strftime("%H:%M:%S"), self.gaps[i])
self.i = i
i = self.nextActiveIndex(i)
tSHU = adjust(self.windows[i].t1, minutes=-2)
tMID = self.windows[i].midpoint()
# anyway sets an for the next self-check
self.nextAlarm(tMID)
# Programs power off time
self.shutdown(tSHU)
| mit |
andre-senna/opencog | opencog/python/dingjie/m_adaptors.py | 36 | 1735 | ##
# @file m_adaptors.py
# @brief adaptor to opencog
# @author Dingjie.Wang
# @version 1.0
# @date 2012-07-31
from opencog.atomspace import Atom, types
from types_inheritance import type_to_name
class Viz_OpenCog_Tree_Adaptor(object):
"""docstring for tree"""
def __init__(self, tree_opencog):
self._op = tree_opencog.op.name if isinstance(tree_opencog.op, Atom) else str(tree_opencog.op)
self._children = []
for child in tree_opencog.args:
self._children.append(Viz_OpenCog_Tree_Adaptor(child))
def get_op(self):
return self._op
def set_op(self, value):
'''docstring for set_op'''
self._op = value
op = property(get_op, set_op)
def get_children(self):
return self._children
children = property(get_children)
class FakeAtom(object):
"""docstring for FakeAtom"""
def __init__(self, t, name, tv = None, av = None):
self.type = t
self.name = name
self.tv = tv
self.av = av
# @@ could just use attribute method
self.type_name = type_to_name(t)
# --------------------------------------------------------------------------------------------------------------
def output_atomspace(a, filename):
'''docstring for output_atomspace'''
try:
f = open(filename, 'w')
atoms = a.get_atoms_by_type(types.Atom)
for atom in atoms:
print atom
print >> f, atom
#print >> f, a.get_tv(atom.h).mean, a.get_tv(atom.h).count, a.get_tv(atom.h).confidence
f.close()
except IOError, e:
print e
raise e
finally:
f.close()
__all__ = ["Viz_OpenCog_Tree_Adaptor", "FakeAtom" ,"output_atomspace" ]
| agpl-3.0 |
thundernet8/WRGameVideos-Server | venv/lib/python2.7/site.py | 784 | 27543 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| gpl-2.0 |
myfunprograms/deep_learning | project5/files/problem_unittests.py | 159 | 6094 | from copy import deepcopy
from unittest import mock
import tensorflow as tf
def test_safe(func):
"""
Isolate tests
"""
def func_wrapper(*args):
with tf.Graph().as_default():
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
def _assert_tensor_shape(tensor, shape, display_name):
assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name))
tensor_shape = tensor.get_shape().as_list() if len(shape) else []
wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape)
if cor_dim is not None and ten_dim != cor_dim]
assert not wrong_dimension, \
'{} has wrong shape. Found {}'.format(display_name, tensor_shape)
def _check_input(tensor, shape, display_name, tf_name=None):
assert tensor.op.type == 'Placeholder', \
'{} is not a Placeholder.'.format(display_name)
_assert_tensor_shape(tensor, shape, 'Real Input')
if tf_name:
assert tensor.name == tf_name, \
'{} has bad name. Found name {}'.format(display_name, tensor.name)
class TmpMock():
"""
Mock a attribute. Restore attribute when exiting scope.
"""
def __init__(self, module, attrib_name):
self.original_attrib = deepcopy(getattr(module, attrib_name))
setattr(module, attrib_name, mock.MagicMock())
self.module = module
self.attrib_name = attrib_name
def __enter__(self):
return getattr(self.module, self.attrib_name)
def __exit__(self, type, value, traceback):
setattr(self.module, self.attrib_name, self.original_attrib)
@test_safe
def test_model_inputs(model_inputs):
image_width = 28
image_height = 28
image_channels = 3
z_dim = 100
input_real, input_z, learn_rate = model_inputs(image_width, image_height, image_channels, z_dim)
_check_input(input_real, [None, image_width, image_height, image_channels], 'Real Input')
_check_input(input_z, [None, z_dim], 'Z Input')
_check_input(learn_rate, [], 'Learning Rate')
@test_safe
def test_discriminator(discriminator, tf_module):
with TmpMock(tf_module, 'variable_scope') as mock_variable_scope:
image = tf.placeholder(tf.float32, [None, 28, 28, 3])
output, logits = discriminator(image)
_assert_tensor_shape(output, [None, 1], 'Discriminator Training(reuse=false) output')
_assert_tensor_shape(logits, [None, 1], 'Discriminator Training(reuse=false) Logits')
assert mock_variable_scope.called,\
'tf.variable_scope not called in Discriminator Training(reuse=false)'
assert mock_variable_scope.call_args == mock.call('discriminator', reuse=False), \
'tf.variable_scope called with wrong arguments in Discriminator Training(reuse=false)'
mock_variable_scope.reset_mock()
output_reuse, logits_reuse = discriminator(image, True)
_assert_tensor_shape(output_reuse, [None, 1], 'Discriminator Inference(reuse=True) output')
_assert_tensor_shape(logits_reuse, [None, 1], 'Discriminator Inference(reuse=True) Logits')
assert mock_variable_scope.called, \
'tf.variable_scope not called in Discriminator Inference(reuse=True)'
assert mock_variable_scope.call_args == mock.call('discriminator', reuse=True), \
'tf.variable_scope called with wrong arguments in Discriminator Inference(reuse=True)'
@test_safe
def test_generator(generator, tf_module):
with TmpMock(tf_module, 'variable_scope') as mock_variable_scope:
z = tf.placeholder(tf.float32, [None, 100])
out_channel_dim = 5
output = generator(z, out_channel_dim)
_assert_tensor_shape(output, [None, 28, 28, out_channel_dim], 'Generator output (is_train=True)')
assert mock_variable_scope.called, \
'tf.variable_scope not called in Generator Training(reuse=false)'
assert mock_variable_scope.call_args == mock.call('generator', reuse=False), \
'tf.variable_scope called with wrong arguments in Generator Training(reuse=false)'
mock_variable_scope.reset_mock()
output = generator(z, out_channel_dim, False)
_assert_tensor_shape(output, [None, 28, 28, out_channel_dim], 'Generator output (is_train=False)')
assert mock_variable_scope.called, \
'tf.variable_scope not called in Generator Inference(reuse=True)'
assert mock_variable_scope.call_args == mock.call('generator', reuse=True), \
'tf.variable_scope called with wrong arguments in Generator Inference(reuse=True)'
@test_safe
def test_model_loss(model_loss):
out_channel_dim = 4
input_real = tf.placeholder(tf.float32, [None, 28, 28, out_channel_dim])
input_z = tf.placeholder(tf.float32, [None, 100])
d_loss, g_loss = model_loss(input_real, input_z, out_channel_dim)
_assert_tensor_shape(d_loss, [], 'Discriminator Loss')
_assert_tensor_shape(d_loss, [], 'Generator Loss')
@test_safe
def test_model_opt(model_opt, tf_module):
with TmpMock(tf_module, 'trainable_variables') as mock_trainable_variables:
with tf.variable_scope('discriminator'):
discriminator_logits = tf.Variable(tf.zeros([3, 3]))
with tf.variable_scope('generator'):
generator_logits = tf.Variable(tf.zeros([3, 3]))
mock_trainable_variables.return_value = [discriminator_logits, generator_logits]
d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=discriminator_logits,
labels=[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]))
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=generator_logits,
labels=[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]))
learning_rate = 0.001
beta1 = 0.9
d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
assert mock_trainable_variables.called,\
'tf.mock_trainable_variables not called'
| gpl-3.0 |
catkin/xylem | setup.py | 1 | 3476 | #!/usr/bin/env python
from setuptools import setup, find_packages
# TODO: read README and LICENSE files to compose "long description"
#
# This might be useful:
# http://stackoverflow.com/questions/1192632/how-to-convert-restructuredtext-to-plain-text
# http://www.jeffknupp.com/blog/2013/08/16/open-sourcing-a-python-project-the-right-way/
# see: http://reinout.vanrees.org/weblog/2009/12/17/managing-dependencies.html
tests_require = ['nose', 'flake8', 'mock', 'coverage', 'testfixtures']
setup(
name='xylem',
version='0.1.0',
packages=find_packages(exclude=['test']),
package_data={
'xylem.sources': [
'xylem/sources/sources.d/*'
]
},
include_package_data=True,
install_requires=[
'six',
'PyYAML',
'argparse',
],
tests_require=tests_require,
extras_require={'test': tests_require},
author='Nikolaus Demmel',
author_email='[email protected]',
maintainer='Nikolaus Demmel',
maintainer_email='[email protected]',
url='https://github.com/catkin/xylem',
keywords=['caktin', 'bloom', 'package manager'],
classifiers=[
'Development Status :: 2 - Pre-Alpha'
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
'Topic :: System :: Installation/Setup',
],
description="xylem is a package manager abstraction tool.",
long_description="xylem is a package manager abstraction tool.",
license='Apache License 2.0',
test_suite='test',
entry_points={
'console_scripts': [
'xylem = xylem.commands.main:main',
'xylem-update = xylem.commands.update:main',
],
'xylem.commands': [
'update = xylem.commands.update:definition',
'resolve = xylem.commands.resolve:definition',
'lookup = xylem.commands.lookup:definition',
'install = xylem.commands.install:definition',
'_compact_rules_file = xylem.commands._compact_rules_file:definition',
],
'xylem.specs': [
'rules = xylem.specs.plugins.rules:definition',
],
'xylem.os': [
'debian = xylem.os_support.plugins:debian_definition',
'ubuntu = xylem.os_support.plugins:ubuntu_definition',
'xubuntu = xylem.os_support.plugins:xubuntu_definition',
'osx = xylem.os_support.plugins:osx_definition',
],
'xylem.installers': [
'fake = xylem.installers.plugins.fake:definition',
'apt = xylem.installers.plugins.apt:definition',
'homebrew = xylem.installers.plugins.homebrew:definition',
'macports = xylem.installers.plugins.macports:definition',
'pip = xylem.installers.plugins.pip:definition',
],
}
)
| apache-2.0 |
cyx1231st/nova | nova/api/openstack/compute/legacy_v2/contrib/server_groups.py | 15 | 9330 | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Server Group API Extension."""
from oslo_log import log as logging
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.validation import parameter_types
import nova.exception
from nova.i18n import _
from nova.i18n import _LE
from nova import objects
from nova import utils
LOG = logging.getLogger(__name__)
SUPPORTED_POLICIES = ['anti-affinity', 'affinity']
authorize = extensions.extension_authorizer('compute', 'server_groups')
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
class ServerGroupController(wsgi.Controller):
"""The Server group API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.ext_mgr = ext_mgr
def _format_server_group(self, context, group):
# the id field has its value as the uuid of the server group
# There is no 'uuid' key in server_group seen by clients.
# In addition, clients see policies as a ["policy-name"] list;
# and they see members as a ["server-id"] list.
server_group = {}
server_group['id'] = group.uuid
server_group['name'] = group.name
server_group['policies'] = group.policies or []
# NOTE(danms): This has been exposed to the user, but never used.
# Since we can't remove it, just make sure it's always empty.
server_group['metadata'] = {}
members = []
if group.members:
# Display the instances that are not deleted.
filters = {'uuid': group.members, 'deleted': False}
instances = objects.InstanceList.get_by_filters(
context, filters=filters)
members = [instance.uuid for instance in instances]
server_group['members'] = members
return server_group
def _validate_policies(self, policies):
"""Validate the policies.
Validates that there are no contradicting policies, for example
'anti-affinity' and 'affinity' in the same group.
Validates that the defined policies are supported.
:param policies: the given policies of the server_group
"""
if ('anti-affinity' in policies and
'affinity' in policies):
msg = _("Conflicting policies configured!")
raise nova.exception.InvalidInput(reason=msg)
not_supported = [policy for policy in policies
if policy not in SUPPORTED_POLICIES]
if not_supported:
msg = _("Invalid policies: %s") % ', '.join(not_supported)
raise nova.exception.InvalidInput(reason=msg)
# Note(wingwj): It doesn't make sense to store duplicate policies.
if sorted(set(policies)) != sorted(policies):
msg = _("Duplicate policies configured!")
raise nova.exception.InvalidInput(reason=msg)
def _validate_input_body(self, body, entity_name):
if not self.is_valid_body(body, entity_name):
msg = _("the body is invalid.")
raise nova.exception.InvalidInput(reason=msg)
subbody = dict(body[entity_name])
expected_fields = ['name', 'policies']
for field in expected_fields:
value = subbody.pop(field, None)
if not value:
msg = _("'%s' is either missing or empty.") % field
raise nova.exception.InvalidInput(reason=msg)
if field == 'name':
utils.check_string_length(value, field,
min_length=1, max_length=255)
if not parameter_types.valid_name_regex_obj.search(value):
msg = _("Invalid format for name: '%s'") % value
raise nova.exception.InvalidInput(reason=msg)
elif field == 'policies':
if isinstance(value, list):
[utils.check_string_length(v, field,
min_length=1, max_length=255) for v in value]
self._validate_policies(value)
else:
msg = _("'%s' is not a list") % value
raise nova.exception.InvalidInput(reason=msg)
if subbody:
msg = _("unsupported fields: %s") % subbody.keys()
raise nova.exception.InvalidInput(reason=msg)
def show(self, req, id):
"""Return data about the given server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'server_group': self._format_server_group(context, sg)}
def delete(self, req, id):
"""Delete an server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
quotas = None
if self.ext_mgr.is_loaded('os-server-group-quotas'):
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_server_group(context,
sg)
try:
# We have to add the quota back to the user that created
# the server group
quotas.reserve(project_id=project_id,
user_id=user_id, server_groups=-1)
except Exception:
quotas = None
LOG.exception(_LE("Failed to update usages deallocating "
"server group"))
try:
sg.destroy()
except nova.exception.InstanceGroupNotFound as e:
if quotas:
quotas.rollback()
raise webob.exc.HTTPNotFound(explanation=e.format_message())
if quotas:
quotas.commit()
return webob.Response(status_int=204)
def index(self, req):
"""Returns a list of server groups."""
context = _authorize_context(req)
project_id = context.project_id
if 'all_projects' in req.GET and context.is_admin:
sgs = objects.InstanceGroupList.get_all(context)
else:
sgs = objects.InstanceGroupList.get_by_project_id(
context, project_id)
limited_list = common.limited(sgs.objects, req)
result = [self._format_server_group(context, group)
for group in limited_list]
return {'server_groups': result}
def create(self, req, body):
"""Creates a new server group."""
context = _authorize_context(req)
try:
self._validate_input_body(body, 'server_group')
except nova.exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
quotas = None
if self.ext_mgr.is_loaded('os-server-group-quotas'):
quotas = objects.Quotas(context=context)
try:
quotas.reserve(project_id=context.project_id,
user_id=context.user_id, server_groups=1)
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
vals = body['server_group']
sg = objects.InstanceGroup(context)
sg.project_id = context.project_id
sg.user_id = context.user_id
try:
sg.name = vals.get('name')
sg.policies = vals.get('policies')
sg.create()
except ValueError as e:
if quotas:
quotas.rollback()
raise exc.HTTPBadRequest(explanation=e)
if quotas:
quotas.commit()
return {'server_group': self._format_server_group(context, sg)}
class Server_groups(extensions.ExtensionDescriptor):
"""Server group support."""
name = "ServerGroups"
alias = "os-server-groups"
namespace = ("http://docs.openstack.org/compute/ext/"
"servergroups/api/v2")
updated = "2013-06-20T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-server-groups',
controller=ServerGroupController(self.ext_mgr),
member_actions={"action": "POST", })
resources.append(res)
return resources
| apache-2.0 |
krunal3103/servo | tests/wpt/web-platform-tests/tools/pytest/testing/code/test_excinfo.py | 165 | 30814 | # -*- coding: utf-8 -*-
import _pytest
import py
import pytest
from _pytest._code.code import FormattedExcinfo, ReprExceptionInfo
queue = py.builtin._tryimport('queue', 'Queue')
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
from test_source import astonly
try:
import importlib
except ImportError:
invalidate_import_caches = None
else:
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
import pytest
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
class TWMock:
def __init__(self):
self.lines = []
def sep(self, sep, line=None):
self.lines.append((sep, line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
fullwidth = 80
def test_excinfo_simple():
try:
raise ValueError
except ValueError:
info = _pytest._code.ExceptionInfo()
assert info.type == ValueError
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
linenumbers = [_pytest._code.getrawcode(f).co_firstlineno - 1 + 3,
_pytest._code.getrawcode(f).co_firstlineno - 1 + 1,
_pytest._code.getrawcode(g).co_firstlineno - 1 + 1, ]
l = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in l]
assert foundlinenumbers == linenumbers
#for x in info:
# print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
#xxx
# testchain for getentries test below
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = True
f()
#
def h():
#
g()
#
class TestTraceback_f_g_h:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = _pytest._code.ExceptionInfo()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ['f', 'g', 'h']
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource() )
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
@astonly
@failsonjython
def test_traceback_entry_getsource_in_construct(self):
source = _pytest._code.Source("""\
def xyz():
try:
raise ValueError
except somenoname:
pass
xyz()
""")
try:
exec (source.compile())
except NameError:
tb = _pytest._code.ExceptionInfo().traceback
print (tb[-1].getsource())
s = str(tb[-1].getsource())
assert s.startswith("def xyz():\n try:")
assert s.strip().endswith("except somenoname:")
def test_traceback_cut(self):
co = _pytest._code.Code(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, testdir):
p = testdir.makepyfile("def f(): raise ValueError")
excinfo = pytest.raises(ValueError, "p.pyimport().f()")
basedir = py.path.local(pytest.__file__).dirpath()
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
if hasattr(x, 'path'):
assert not py.path.local(x.path).relto(basedir)
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter()
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = pytest.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n-1)
excinfo = pytest.raises(RuntimeError, f, 100)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self):
def do_stuff():
raise RuntimeError
def reraise_me():
import sys
exc, val, tb = sys.exc_info()
py.builtin._reraise(exc, val, tb)
def f(n):
try:
do_stuff()
except:
reraise_me()
excinfo = pytest.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
#XXX: simplified locally testable version
decorator = pytest.importorskip('decorator').decorator
def log(f, *k, **kw):
print('%s %s' % (k, kw))
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError('')
fail = log(log(fail))
excinfo = pytest.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_traceback_getcrashentry(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = pytest.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = _pytest._code.Code(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == 'h'
def test_traceback_getcrashentry_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = pytest.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = _pytest._code.Code(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == 'g'
def hello(x):
x + 5
def test_tbentry_reinterpret():
try:
hello("hello")
except TypeError:
excinfo = _pytest._code.ExceptionInfo()
tbentry = excinfo.traceback[-1]
msg = tbentry.reinterpret()
assert msg.startswith("TypeError: ('hello' + 5)")
def test_excinfo_exconly():
excinfo = pytest.raises(ValueError, h)
assert excinfo.exconly().startswith('ValueError')
excinfo = pytest.raises(ValueError,
"raise ValueError('hello\\nworld')")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith('ValueError')
assert msg.endswith("world")
def test_excinfo_repr():
excinfo = pytest.raises(ValueError, h)
s = repr(excinfo)
assert s == "<ExceptionInfo ValueError tblen=4>"
def test_excinfo_str():
excinfo = pytest.raises(ValueError, h)
s = str(excinfo)
assert s.startswith(__file__[:-9]) # pyc file and $py.class
assert s.endswith("ValueError")
assert len(s.split(":")) >= 3 # on windows it's 4
def test_excinfo_errisinstance():
excinfo = pytest.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec ("raise ValueError()")
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
s = str(excinfo.traceback[-1])
if py.std.sys.version_info < (2,5):
assert s == " File '<string>':1 in ?\n ???\n"
else:
assert s == " File '<string>':1 in <module>\n ???\n"
def test_excinfo_no_python_sourcecode(tmpdir):
#XXX: simplified locally testable version
tmpdir.join('test.txt').write("{{ h()}}:")
jinja2 = pytest.importorskip('jinja2')
loader = jinja2.FileSystemLoader(str(tmpdir))
env = jinja2.Environment(loader=loader)
template = env.get_template('test.txt')
excinfo = pytest.raises(ValueError,
template.render, h=h)
for item in excinfo.traceback:
print(item) #XXX: for some reason jinja.Template.render is printed in full
item.source # shouldnt fail
if item.path.basename == 'test.txt':
assert str(item.source) == '{{ h()}}:'
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = _pytest._code.ExceptionInfo()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = _pytest._code.ExceptionInfo()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, py.path.local)
assert path.basename.lower() == "queue.py"
assert path.check()
class TestFormattedExcinfo:
def pytest_funcarg__importasmod(self, request):
def importasmod(source):
source = _pytest._code.Source(source)
tmpdir = request.getfuncargvalue("tmpdir")
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
if invalidate_import_caches is not None:
invalidate_import_caches()
return modpath.pyimport()
return importasmod
def excinfo_from_exec(self, source):
source = _pytest._code.Source(source).strip()
try:
exec (source.compile())
except KeyboardInterrupt:
raise
except:
return _pytest._code.ExceptionInfo()
assert 0, "did not raise"
def test_repr_source(self):
pr = FormattedExcinfo()
source = _pytest._code.Source("""
def f(x):
pass
""").strip()
pr.flow_marker = "|"
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
def test_repr_source_excinfo(self):
""" check if indentation is right """
pr = FormattedExcinfo()
excinfo = self.excinfo_from_exec("""
def f():
assert 0
f()
""")
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
lines = pr.get_source(source, 1, excinfo)
assert lines == [
' def f():',
'> assert 0',
'E assert 0'
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec (co)
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("""
a = 1
raise ValueError()
""", "", "exec")
try:
exec (co)
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self):
pr = FormattedExcinfo()
class FakeCode(object):
class raw:
co_filename = '?'
path = '?'
firstlineno = 5
def fullsource(self):
return None
fullsource = property(fullsource)
class FakeFrame(object):
code = FakeCode()
f_locals = {}
f_globals = {}
class FakeTracebackEntry(_pytest._code.Traceback.Entry):
def __init__(self, tb):
self.lineno = 5+3
@property
def frame(self):
return FakeFrame()
class Traceback(_pytest._code.Traceback):
Entry = FakeTracebackEntry
class FakeExcinfo(_pytest._code.ExceptionInfo):
typename = "Foo"
def __init__(self):
pass
def exconly(self, tryshort):
return "EXC"
def errisinstance(self, cls):
return False
excinfo = FakeExcinfo()
class FakeRawTB(object):
tb_next = None
tb = FakeRawTB()
excinfo.traceback = Traceback(tb)
fail = IOError() # noqa
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
fail = py.error.ENOENT # noqa
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
def test_repr_local(self):
p = FormattedExcinfo(showlocals=True)
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == '__builtins__ = <builtins>'
assert reprlocals.lines[1] == 'x = 3'
assert reprlocals.lines[2] == 'y = 5'
assert reprlocals.lines[3] == 'z = 7'
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello\\nworld")
""")
excinfo = pytest.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter()
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == 'E ValueError: hello'
assert lines[3] == 'E world'
assert not lines[4:]
loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
#assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines2(self, importasmod):
mod = importasmod("""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
""")
excinfo = pytest.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('m', repr("m"*90))
assert reprfuncargs.args[1] == ('x', '5')
assert reprfuncargs.args[2] == ('y', '13')
assert reprfuncargs.args[3] == ('z', repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "m = " + repr('m' * 90)
assert tw.lines[1] == "x = 5, y = 13"
assert tw.lines[2] == "z = " + repr('z' * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
mod = importasmod("""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
""")
excinfo = pytest.raises(ValueError, mod.func1, 'a', 'b', c='d')
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('x', repr('a'))
assert reprfuncargs.args[1] == ('y', repr(('b',)))
assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == 'E ValueError: hello'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == 'E ValueError: hello'
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = pytest.raises(ValueError, mod.entry)
from _pytest._code.code import Code
monkeypatch.setattr(Code, 'path', 'bogus')
excinfo.traceback[0].frame.code.path = "bogus"
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
monkeypatch.undo()
assert lines[0] == ' func1()'
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == 'E ValueError: hello'
def test_repr_traceback_and_excinfo(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
for style in ("long", "short"):
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo()
def raiseos():
raise OSError(2)
monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
assert p._makepath(__file__) == __file__
p.repr_traceback(excinfo)
def test_repr_excinfo_addouterr(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
twmock = TWMock()
repr.toterminal(twmock)
assert twmock.lines[-1] == "content"
assert twmock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod("""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
""")
excinfo = pytest.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
def test_tb_entry_AssertionError(self, importasmod):
# probably this test is a bit redundant
# as py/magic/testing/test_assertion.py
# already tests correctness of
# assertion-reinterpretation logic
mod = importasmod("""
def somefunc():
x = 1
assert x == 2
""")
excinfo = pytest.raises(AssertionError, mod.somefunc)
p = FormattedExcinfo()
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[-1] == "E assert 1 == 2"
def test_reprexcinfo_getrepr(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
for style in ("short", "long", "no"):
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert isinstance(repr, ReprExceptionInfo)
assert repr.reprtraceback.style == style
def test_reprexcinfo_unicode(self):
from _pytest._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw):
tw.line(py.builtin._totext("я", "utf-8"))
x = py.builtin._totext(MyRepr())
assert x == py.builtin._totext("я", "utf-8")
def test_toterminal_long(self, importasmod):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == " def f():"
assert tw.lines[1] == "> g(3)"
assert tw.lines[2] == ""
assert tw.lines[3].endswith("mod.py:5: ")
assert tw.lines[4] == ("_ ", None)
assert tw.lines[5] == ""
assert tw.lines[6] == " def g(x):"
assert tw.lines[7] == "> raise ValueError(x)"
assert tw.lines[8] == "E ValueError: 3"
assert tw.lines[9] == ""
assert tw.lines[10].endswith("mod.py:3: ValueError")
def test_toterminal_long_missing_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
tmpdir.join('mod.py').remove()
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
tmpdir.join('mod.py').write('asdf')
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_filenames(self, importasmod):
mod = importasmod("""
def f():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.f)
tw = TWMock()
path = py.path.local(mod.__file__)
old = path.dirpath().chdir()
try:
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw)
line = tw.lines[-1]
x = py.path.local().bestrelpath(path)
if len(x) < len(str(path)):
assert line == "mod.py:3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw)
line = tw.lines[-1]
assert line == "%s:3: ValueError" %(path,)
finally:
old.chdir()
@pytest.mark.parametrize('reproptions', [
{'style': style, 'showlocals': showlocals,
'funcargs': funcargs, 'tbfilter': tbfilter
} for style in ("long", "short", "no")
for showlocals in (True, False)
for tbfilter in (True, False)
for funcargs in (True, False)])
def test_format_excinfo(self, importasmod, reproptions):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
tw = py.io.TerminalWriter(stringio=True)
repr = excinfo.getrepr(**reproptions)
repr.toterminal(tw)
assert tw.stringio.getvalue()
def test_native_style(self):
excinfo = self.excinfo_from_exec("""
assert 0
""")
repr = excinfo.getrepr(style='native')
assert "assert 0" in str(repr.reprcrash)
s = str(repr)
assert s.startswith('Traceback (most recent call last):\n File')
assert s.endswith('\nAssertionError: assert 0')
assert 'exec (source.compile())' in s
# python 2.4 fails to get the source line for the assert
if py.std.sys.version_info >= (2, 5):
assert s.count('assert 0') == 2
def test_traceback_repr_style(self, importasmod):
mod = importasmod("""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
excinfo.traceback[1].set_repr_style("short")
excinfo.traceback[2].set_repr_style("short")
r = excinfo.getrepr(style="long")
tw = TWMock()
r.toterminal(tw)
for line in tw.lines: print (line)
assert tw.lines[0] == ""
assert tw.lines[1] == " def f():"
assert tw.lines[2] == "> g()"
assert tw.lines[3] == ""
assert tw.lines[4].endswith("mod.py:3: ")
assert tw.lines[5] == ("_ ", None)
assert tw.lines[6].endswith("in g")
assert tw.lines[7] == " h()"
assert tw.lines[8].endswith("in h")
assert tw.lines[9] == " i()"
assert tw.lines[10] == ("_ ", None)
assert tw.lines[11] == ""
assert tw.lines[12] == " def i():"
assert tw.lines[13] == "> raise ValueError()"
assert tw.lines[14] == "E ValueError"
assert tw.lines[15] == ""
assert tw.lines[16].endswith("mod.py:9: ValueError")
| mpl-2.0 |
miniconfig/home-assistant | homeassistant/helpers/aiohttp_client.py | 9 | 4716 | """Helper for aiohttp webclient stuff."""
import asyncio
import sys
import aiohttp
from aiohttp.hdrs import USER_AGENT, CONTENT_TYPE
from aiohttp import web
from aiohttp.web_exceptions import HTTPGatewayTimeout
import async_timeout
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.const import __version__
DATA_CONNECTOR = 'aiohttp_connector'
DATA_CONNECTOR_NOTVERIFY = 'aiohttp_connector_notverify'
DATA_CLIENTSESSION = 'aiohttp_clientsession'
DATA_CLIENTSESSION_NOTVERIFY = 'aiohttp_clientsession_notverify'
SERVER_SOFTWARE = 'HomeAssistant/{0} aiohttp/{1} Python/{2[0]}.{2[1]}'.format(
__version__, aiohttp.__version__, sys.version_info)
@callback
def async_get_clientsession(hass, verify_ssl=True):
"""Return default aiohttp ClientSession.
This method must be run in the event loop.
"""
if verify_ssl:
key = DATA_CLIENTSESSION
else:
key = DATA_CLIENTSESSION_NOTVERIFY
if key not in hass.data:
connector = _async_get_connector(hass, verify_ssl)
clientsession = aiohttp.ClientSession(
loop=hass.loop,
connector=connector,
headers={USER_AGENT: SERVER_SOFTWARE}
)
_async_register_clientsession_shutdown(hass, clientsession)
hass.data[key] = clientsession
return hass.data[key]
@callback
def async_create_clientsession(hass, verify_ssl=True, auto_cleanup=True,
**kwargs):
"""Create a new ClientSession with kwargs, i.e. for cookies.
If auto_cleanup is False, you need to call detach() after the session
returned is no longer used. Default is True, the session will be
automatically detached on homeassistant_stop.
This method must be run in the event loop.
"""
connector = _async_get_connector(hass, verify_ssl)
clientsession = aiohttp.ClientSession(
loop=hass.loop,
connector=connector,
headers={USER_AGENT: SERVER_SOFTWARE},
**kwargs
)
if auto_cleanup:
_async_register_clientsession_shutdown(hass, clientsession)
return clientsession
@asyncio.coroutine
def async_aiohttp_proxy_stream(hass, request, stream_coro, buffer_size=102400,
timeout=10):
"""Stream websession request to aiohttp web response."""
response = None
stream = None
try:
with async_timeout.timeout(timeout, loop=hass.loop):
stream = yield from stream_coro
response = web.StreamResponse()
response.content_type = stream.headers.get(CONTENT_TYPE)
yield from response.prepare(request)
while True:
data = yield from stream.content.read(buffer_size)
if not data:
break
response.write(data)
except asyncio.TimeoutError:
raise HTTPGatewayTimeout()
except (aiohttp.errors.ClientError,
aiohttp.errors.ClientDisconnectedError):
pass
except (asyncio.CancelledError, ConnectionResetError):
response = None
finally:
if stream is not None:
stream.close()
if response is not None:
yield from response.write_eof()
@callback
# pylint: disable=invalid-name
def _async_register_clientsession_shutdown(hass, clientsession):
"""Register ClientSession close on homeassistant shutdown.
This method must be run in the event loop.
"""
@callback
def _async_close_websession(event):
"""Close websession."""
clientsession.detach()
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, _async_close_websession)
@callback
def _async_get_connector(hass, verify_ssl=True):
"""Return the connector pool for aiohttp.
This method must be run in the event loop.
"""
is_new = False
if verify_ssl:
if DATA_CONNECTOR not in hass.data:
connector = aiohttp.TCPConnector(loop=hass.loop)
hass.data[DATA_CONNECTOR] = connector
is_new = True
else:
connector = hass.data[DATA_CONNECTOR]
else:
if DATA_CONNECTOR_NOTVERIFY not in hass.data:
connector = aiohttp.TCPConnector(loop=hass.loop, verify_ssl=False)
hass.data[DATA_CONNECTOR_NOTVERIFY] = connector
is_new = True
else:
connector = hass.data[DATA_CONNECTOR_NOTVERIFY]
if is_new:
@asyncio.coroutine
def _async_close_connector(event):
"""Close connector pool."""
yield from connector.close()
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, _async_close_connector)
return connector
| mit |
robovm/robovm-studio | python/lib/Lib/site-packages/django/contrib/staticfiles/storage.py | 71 | 3364 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import FileSystemStorage
from django.utils.importlib import import_module
from django.contrib.staticfiles import utils
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for site media files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
if not location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT setting. Set it to "
"the absolute path of the directory that holds static media.")
# check for None since we might use a root URL (``/``)
if base_url is None:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_URL setting. Set it to "
"URL that handles the files served from STATIC_ROOT.")
if settings.DEBUG:
utils.check_settings()
super(StaticFilesStorage, self).__init__(location, base_url, *args, **kwargs)
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is actually the models module of the app. Remove the '.models'.
bits = app.__name__.split('.')[:-1]
self.app_name = bits[-1]
self.app_module = '.'.join(bits)
# The models module (app) may be a package in which case
# dirname(app.__file__) would be wrong. Import the actual app
# as opposed to the models module.
app = import_module(self.app_module)
location = self.get_location(os.path.dirname(app.__file__))
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
def get_location(self, app_root):
"""
Given the app root, return the location of the static files of an app,
by default 'static'. We special case the admin app here since it has
its static files in 'media'.
"""
if self.app_module == 'django.contrib.admin':
return os.path.join(app_root, 'media')
return os.path.join(app_root, self.source_dir)
def get_prefix(self):
"""
Return the path name that should be prepended to files for this app.
"""
if self.app_module == 'django.contrib.admin':
return self.app_name
return None
def get_files(self, ignore_patterns=[]):
"""
Return a list containing the relative source paths for all files that
should be copied for an app.
"""
files = []
prefix = self.get_prefix()
for path in utils.get_files(self, ignore_patterns):
if prefix:
path = '/'.join([prefix, path])
files.append(path)
return files
| apache-2.0 |
ClearCorp-dev/odoo | addons/mrp_operations/__init__.py | 443 | 1091 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_operations
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rubyu/anki | aqt/fields.py | 18 | 5690 | # Copyright: Damien Elmes <[email protected]>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
from anki.consts import *
import aqt
from aqt.utils import showWarning, openHelp, getOnlyText, askUser
class FieldDialog(QDialog):
def __init__(self, mw, note, ord=0, parent=None):
QDialog.__init__(self, parent or mw) #, Qt.Window)
self.mw = aqt.mw
self.parent = parent or mw
self.note = note
self.col = self.mw.col
self.mm = self.mw.col.models
self.model = note.model()
self.mw.checkpoint(_("Fields"))
self.form = aqt.forms.fields.Ui_Dialog()
self.form.setupUi(self)
self.setWindowTitle(_("Fields for %s") % self.model['name'])
self.form.buttonBox.button(QDialogButtonBox.Help).setAutoDefault(False)
self.form.buttonBox.button(QDialogButtonBox.Close).setAutoDefault(False)
self.currentIdx = None
self.oldSortField = self.model['sortf']
self.fillFields()
self.setupSignals()
self.form.fieldList.setCurrentRow(0)
self.exec_()
##########################################################################
def fillFields(self):
self.currentIdx = None
self.form.fieldList.clear()
for f in self.model['flds']:
self.form.fieldList.addItem(f['name'])
def setupSignals(self):
c = self.connect
s = SIGNAL
f = self.form
c(f.fieldList, s("currentRowChanged(int)"), self.onRowChange)
c(f.fieldAdd, s("clicked()"), self.onAdd)
c(f.fieldDelete, s("clicked()"), self.onDelete)
c(f.fieldRename, s("clicked()"), self.onRename)
c(f.fieldPosition, s("clicked()"), self.onPosition)
c(f.sortField, s("clicked()"), self.onSortField)
c(f.buttonBox, s("helpRequested()"), self.onHelp)
def onRowChange(self, idx):
if idx == -1:
return
self.saveField()
self.loadField(idx)
def _uniqueName(self, prompt, ignoreOrd=None, old=""):
txt = getOnlyText(prompt, default=old)
if not txt:
return
for f in self.model['flds']:
if ignoreOrd is not None and f['ord'] == ignoreOrd:
continue
if f['name'] == txt:
showWarning(_("That field name is already used."))
return
return txt
def onRename(self):
idx = self.currentIdx
f = self.model['flds'][idx]
name = self._uniqueName(_("New name:"), self.currentIdx, f['name'])
if not name:
return
self.mm.renameField(self.model, f, name)
self.saveField()
self.fillFields()
self.form.fieldList.setCurrentRow(idx)
def onAdd(self):
name = self._uniqueName(_("Field name:"))
if not name:
return
self.saveField()
self.mw.progress.start()
f = self.mm.newField(name)
self.mm.addField(self.model, f)
self.mw.progress.finish()
self.fillFields()
self.form.fieldList.setCurrentRow(len(self.model['flds'])-1)
def onDelete(self):
if len(self.model['flds']) < 2:
return showWarning(_("Notes require at least one field."))
c = self.mm.useCount(self.model)
c = ngettext("%d note", "%d notes", c) % c
if not askUser(_("Delete field from %s?") % c):
return
f = self.model['flds'][self.form.fieldList.currentRow()]
self.mw.progress.start()
self.mm.remField(self.model, f)
self.mw.progress.finish()
self.fillFields()
self.form.fieldList.setCurrentRow(0)
def onPosition(self, delta=-1):
idx = self.currentIdx
l = len(self.model['flds'])
txt = getOnlyText(_("New position (1...%d):") % l, default=str(idx+1))
if not txt:
return
try:
pos = int(txt)
except ValueError:
return
if not 0 < pos <= l:
return
self.saveField()
f = self.model['flds'][self.currentIdx]
self.mw.progress.start()
self.mm.moveField(self.model, f, pos-1)
self.mw.progress.finish()
self.fillFields()
self.form.fieldList.setCurrentRow(pos-1)
def onSortField(self):
# don't allow user to disable; it makes no sense
self.form.sortField.setChecked(True)
self.model['sortf'] = self.form.fieldList.currentRow()
def loadField(self, idx):
self.currentIdx = idx
fld = self.model['flds'][idx]
f = self.form
f.fontFamily.setCurrentFont(QFont(fld['font']))
f.fontSize.setValue(fld['size'])
f.sticky.setChecked(fld['sticky'])
f.sortField.setChecked(self.model['sortf'] == fld['ord'])
f.rtl.setChecked(fld['rtl'])
def saveField(self):
# not initialized yet?
if self.currentIdx is None:
return
idx = self.currentIdx
fld = self.model['flds'][idx]
f = self.form
fld['font'] = f.fontFamily.currentFont().family()
fld['size'] = f.fontSize.value()
fld['sticky'] = f.sticky.isChecked()
fld['rtl'] = f.rtl.isChecked()
def reject(self):
self.saveField()
if self.oldSortField != self.model['sortf']:
self.mw.progress.start()
self.mw.col.updateFieldCache(self.mm.nids(self.model))
self.mw.progress.finish()
self.mm.save(self.model)
self.mw.reset()
QDialog.reject(self)
def accept(self):
self.reject()
def onHelp(self):
openHelp("fields")
| agpl-3.0 |
chamaelj/tools-artbio | unstable/local_tools/id_to_fasta_in_bowtie.py | 4 | 1821 | #!/usr/bin/python
import sys
# python script to extract read id from the id column and pick up the corresponding sequence in a fasta reference read library.
# to use when 3' trimming in bowtie has degraded the information of the initial sequence read
# The script takes a bowtie standard output and create a dictionary of unique id (collapse if multimatching)
# it also takes the fasta reference read library and create another dictionary for fast matching
# it finally outputs a fasta file of reads with the original sequence (untrimmed)
# In addition, filtering can be applied to the output so that only fasta reads with the desired last three nucleotides are output.
# Usage:
# id_to_fasta_in_bowtie.py <bowtie tabular output> <fasta reference read library> <fasta with restored sequences> <fasta or bowtie format> <filter string>
Fbowtie = open(sys.argv[1])
bowtie_dic = {}
for line in Fbowtie:
idbowtie = line.split()[0]
bowtie_dic[idbowtie]= line[:-1]
Fbowtie.close()
Ffasta = open(sys.argv[2])
fasta_dic = {}
for line in Ffasta:
if line[0] == ">":
idfasta = line[1:-1]
else:
fasta_dic[idfasta] = line[:-1]
Ffasta.close()
output = open(sys.argv[3], "w")
if sys.argv[4] == "fasta":
try:
sys.argv[5]
for id in bowtie_dic:
if fasta_dic[id][-3:]==sys.argv[5]:
print >> output, ">%s\n%s" % (id,fasta_dic[id])
except:
for id in bowtie_dic:
print >> output, ">%s\n%s" % (id,fasta_dic[id])
else:
try:
sys.argv[5]
for id in bowtie_dic:
if fasta_dic[id][-3:]==sys.argv[5]:
fields = bowtie_dic[id].split()
fields[3]=fasta_dic[id]
print >> output, "\t".join(fields)
except:
for id in bowtie_dic:
fields = bowtie_dic[id].split()
fields[3]=fasta_dic[id]
print >> output, "\t".join(fields)
output.close()
| mit |
fiete201/qutebrowser | qutebrowser/misc/utilcmds.py | 2 | 9615 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Misc. utility commands exposed to the user."""
# QApplication and objects are imported so they're usable in :debug-pyeval
import functools
import os
import traceback
from typing import Optional
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from qutebrowser.browser import qutescheme
from qutebrowser.utils import log, objreg, usertypes, message, debug, utils
from qutebrowser.keyinput import modeman
from qutebrowser.commands import runners
from qutebrowser.api import cmdutils
from qutebrowser.misc import ( # pylint: disable=unused-import
consolewidget, debugcachestats, objects, miscwidgets)
from qutebrowser.utils.version import pastebin_version
from qutebrowser.qt import sip
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
def later(duration: str, command: str, win_id: int) -> None:
"""Execute a command after some time.
Args:
duration: Duration to wait in format XhYmZs or a number for milliseconds.
command: The command to run, with optional args.
"""
try:
ms = utils.parse_duration(duration)
except ValueError as e:
raise cmdutils.CommandError(e)
commandrunner = runners.CommandRunner(win_id)
timer = usertypes.Timer(name='later', parent=QApplication.instance())
try:
timer.setSingleShot(True)
try:
timer.setInterval(ms)
except OverflowError:
raise cmdutils.CommandError("Numeric argument is too large for "
"internal int representation.")
timer.timeout.connect(
functools.partial(commandrunner.run_safely, command))
timer.timeout.connect(timer.deleteLater)
timer.start()
except:
timer.deleteLater()
raise
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
@cmdutils.argument('count', value=cmdutils.Value.count)
def repeat(times: int, command: str, win_id: int, count: int = None) -> None:
"""Repeat a given command.
Args:
times: How many times to repeat.
command: The command to run, with optional args.
count: Multiplies with 'times' when given.
"""
if count is not None:
times *= count
if times < 0:
raise cmdutils.CommandError("A negative count doesn't make sense.")
commandrunner = runners.CommandRunner(win_id)
for _ in range(times):
commandrunner.run_safely(command)
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
@cmdutils.argument('count', value=cmdutils.Value.count)
def run_with_count(count_arg: int, command: str, win_id: int,
count: int = 1) -> None:
"""Run a command with the given count.
If run_with_count itself is run with a count, it multiplies count_arg.
Args:
count_arg: The count to pass to the command.
command: The command to run, with optional args.
count: The count that run_with_count itself received.
"""
runners.CommandRunner(win_id).run(command, count_arg * count)
@cmdutils.register()
def clear_messages() -> None:
"""Clear all message notifications."""
message.global_bridge.clear_messages.emit()
@cmdutils.register(debug=True)
def debug_all_objects() -> None:
"""Print a list of all objects to the debug log."""
s = debug.get_all_objects()
log.misc.debug(s)
@cmdutils.register(debug=True)
def debug_cache_stats() -> None:
"""Print LRU cache stats."""
debugcachestats.debug_cache_stats()
@cmdutils.register(debug=True)
def debug_console() -> None:
"""Show the debugging console."""
if consolewidget.console_widget is None:
log.misc.debug('initializing debug console')
consolewidget.init()
assert consolewidget.console_widget is not None
if consolewidget.console_widget.isVisible():
log.misc.debug('hiding debug console')
consolewidget.console_widget.hide()
else:
log.misc.debug('showing debug console')
consolewidget.console_widget.show()
@cmdutils.register(maxsplit=0, debug=True, no_cmd_split=True)
def debug_pyeval(s: str, file: bool = False, quiet: bool = False) -> None:
"""Evaluate a python string and display the results as a web page.
Args:
s: The string to evaluate.
file: Interpret s as a path to file, also implies --quiet.
quiet: Don't show the output in a new tab.
"""
if file:
quiet = True
path = os.path.expanduser(s)
try:
with open(path, 'r', encoding='utf-8') as f:
s = f.read()
except OSError as e:
raise cmdutils.CommandError(str(e))
try:
exec(s)
out = "No error"
except Exception:
out = traceback.format_exc()
else:
try:
r = eval(s)
out = repr(r)
except Exception:
out = traceback.format_exc()
qutescheme.pyeval_output = out
if quiet:
log.misc.debug("pyeval output: {}".format(out))
else:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.load_url(QUrl('qute://pyeval'), newtab=True)
@cmdutils.register(debug=True)
def debug_set_fake_clipboard(s: str = None) -> None:
"""Put data into the fake clipboard and enable logging, used for tests.
Args:
s: The text to put into the fake clipboard, or unset to enable logging.
"""
if s is None:
utils.log_clipboard = True
else:
utils.fake_clipboard = s
@cmdutils.register()
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
@cmdutils.argument('count', value=cmdutils.Value.count)
def repeat_command(win_id: int, count: int = None) -> None:
"""Repeat the last executed command.
Args:
count: Which count to pass the command.
"""
mode_manager = modeman.instance(win_id)
if mode_manager.mode not in runners.last_command:
raise cmdutils.CommandError("You didn't do anything yet.")
cmd = runners.last_command[mode_manager.mode]
commandrunner = runners.CommandRunner(win_id)
commandrunner.run(cmd[0], count if count is not None else cmd[1])
@cmdutils.register(debug=True, name='debug-log-capacity')
def log_capacity(capacity: int) -> None:
"""Change the number of log lines to be stored in RAM.
Args:
capacity: Number of lines for the log.
"""
if capacity < 0:
raise cmdutils.CommandError("Can't set a negative log capacity!")
assert log.ram_handler is not None
log.ram_handler.change_log_capacity(capacity)
@cmdutils.register(debug=True)
def debug_log_filter(filters: str) -> None:
"""Change the log filter for console logging.
Args:
filters: A comma separated list of logger names. Can also be "none" to
clear any existing filters.
"""
if log.console_filter is None:
raise cmdutils.CommandError("No log.console_filter. Not attached "
"to a console?")
try:
new_filter = log.LogFilter.parse(filters)
except log.InvalidLogFilterError as e:
raise cmdutils.CommandError(e)
log.console_filter.update_from(new_filter)
@cmdutils.register()
@cmdutils.argument('current_win_id', value=cmdutils.Value.win_id)
def window_only(current_win_id: int) -> None:
"""Close all windows except for the current one."""
for win_id, window in objreg.window_registry.items():
# We could be in the middle of destroying a window here
if sip.isdeleted(window):
continue
if win_id != current_win_id:
window.close()
@cmdutils.register()
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
def version(win_id: int, paste: bool = False) -> None:
"""Show version information.
Args:
paste: Paste to pastebin.
"""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.load_url(QUrl('qute://version/'), newtab=True)
if paste:
pastebin_version()
_keytester_widget: Optional[miscwidgets.KeyTesterWidget] = None
@cmdutils.register(debug=True)
def debug_keytester() -> None:
"""Show a keytester widget."""
global _keytester_widget
if (_keytester_widget and
not sip.isdeleted(_keytester_widget) and
_keytester_widget.isVisible()):
_keytester_widget.close()
else:
_keytester_widget = miscwidgets.KeyTesterWidget()
_keytester_widget.show()
| gpl-3.0 |
YinongLong/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
aureooms/networkx | networkx/algorithms/bipartite/centrality.py | 4 | 8129 | #-*- coding: utf-8 -*-
# Copyright (C) 2011 by
# Jordi Torrents <[email protected]>
# Aric Hagberg <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <[email protected]>',
'Aric Hagberg ([email protected])'])
__all__=['degree_centrality',
'betweenness_centrality',
'closeness_centrality']
def degree_centrality(G, nodes):
r"""Compute the degree centrality for nodes in a bipartite network.
The degree centrality for a node `v` is the fraction of nodes
connected to it.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
centrality : dictionary
Dictionary keyed by node with bipartite degree centrality as the value.
See Also
--------
betweenness_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must conatin all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both bipartite node
sets.
For unipartite networks, the degree centrality values are
normalized by dividing by the maximum possible degree (which is
`n-1` where `n` is the number of nodes in G).
In the bipartite case, the maximum possible degree of a node in a
bipartite node set is the number of nodes in the opposite node set
[1]_. The degree centrality for a node `v` in the bipartite
sets `U` with `n` nodes and `V` with `m` nodes is
.. math::
d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
where `deg(v)` is the degree of node `v`.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/papers/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
s = 1.0/len(bottom)
centrality = dict((n,d*s) for n,d in G.degree(top))
s = 1.0/len(top)
centrality.update(dict((n,d*s) for n,d in G.degree(bottom)))
return centrality
def betweenness_centrality(G, nodes):
r"""Compute betweenness centrality for nodes in a bipartite network.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`.
Values of betweenness are normalized by the maximum possible
value which for bipartite graphs is limited by the relative size
of the two node sets [1]_.
Let `n` be the number of nodes in the node set `U` and
`m` be the number of nodes in the node set `V`, then
nodes in `U` are normalized by dividing by
.. math::
\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
where
.. math::
s = (n - 1) \div m , t = (n - 1) \mod m ,
and nodes in `V` are normalized by dividing by
.. math::
\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
where,
.. math::
p = (m - 1) \div n , r = (m - 1) \mod n .
Parameters
----------
G : graph
A bipartite graph
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
betweenness : dictionary
Dictionary keyed by node with bipartite betweenness centrality
as the value.
See Also
--------
degree_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/papers/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
s = (n-1) // m
t = (n-1) % m
bet_max_top = (((m**2)*((s+1)**2))+
(m*(s+1)*(2*t-s-1))-
(t*((2*s)-t+3)))/2.0
p = (m-1) // n
r = (m-1) % n
bet_max_bot = (((n**2)*((p+1)**2))+
(n*(p+1)*(2*r-p-1))-
(r*((2*p)-r+3)))/2.0
betweenness = nx.betweenness_centrality(G, normalized=False,
weight=None)
for node in top:
betweenness[node]/=bet_max_top
for node in bottom:
betweenness[node]/=bet_max_bot
return betweenness
def closeness_centrality(G, nodes, normalized=True):
r"""Compute the closeness centrality for nodes in a bipartite network.
The closeness of a node is the distance to all other nodes in the
graph or in the case that the graph is not connected to all other nodes
in the connected component containing that node.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
normalized : bool, optional
If True (default) normalize by connected component size.
Returns
-------
closeness : dictionary
Dictionary keyed by node with bipartite closeness centrality
as the value.
See Also
--------
betweenness_centrality,
degree_centrality
sets,
is_bipartite
Notes
-----
The nodes input parameter must conatin all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
Closeness centrality is normalized by the minimum distance possible.
In the bipartite case the minimum distance for a node in one bipartite
node set is 1 from all nodes in the other node set and 2 from all
other nodes in its own set [1]_. Thus the closeness centrality
for node `v` in the two bipartite sets `U` with
`n` nodes and `V` with `m` nodes is
.. math::
c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
where `d` is the sum of the distances from `v` to all
other nodes.
Higher values of closeness indicate higher centrality.
As in the unipartite case, setting normalized=True causes the
values to normalized further to n-1 / size(G)-1 where n is the
number of nodes in the connected part of graph containing the
node. If the graph is not completely connected, this algorithm
computes the closeness centrality for each connected part
separately.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/papers/bhaffiliations.pdf
"""
closeness={}
path_length=nx.single_source_shortest_path_length
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
for node in top:
sp=path_length(G,node)
totsp=sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node]= (m + 2*(n-1)) / totsp
if normalized:
s=(len(sp)-1.0) / ( len(G) - 1 )
closeness[node] *= s
else:
closeness[n]=0.0
for node in bottom:
sp=path_length(G,node)
totsp=sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node]= (n + 2*(m-1)) / totsp
if normalized:
s=(len(sp)-1.0) / ( len(G) - 1 )
closeness[node] *= s
else:
closeness[n]=0.0
return closeness
| bsd-3-clause |
kmoocdev2/edx-platform | cms/djangoapps/contentstore/views/tests/test_entrance_exam.py | 22 | 13731 | """
Test module for Entrance Exams AJAX callback handler workflows
"""
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from opaque_keys.edx.keys import UsageKey
from contentstore.tests.utils import AjaxEnabledTestClient, CourseTestCase
from contentstore.utils import reverse_url
from contentstore.views.entrance_exam import (
add_entrance_exam_milestone,
create_entrance_exam,
delete_entrance_exam,
remove_entrance_exam_milestone_reference,
update_entrance_exam
)
from contentstore.views.helpers import GRADER_TYPES, create_xblock
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from student.tests.factories import UserFactory
from util import milestones_helpers
from xmodule.modulestore.django import modulestore
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
class EntranceExamHandlerTests(CourseTestCase, MilestonesTestCaseMixin):
"""
Base test class for create, save, and delete
"""
def setUp(self):
"""
Shared scaffolding for individual test runs
"""
super(EntranceExamHandlerTests, self).setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
self.course_url = '/course/{}'.format(unicode(self.course.id))
self.exam_url = '/course/{}/entrance_exam/'.format(unicode(self.course.id))
self.milestone_relationship_types = milestones_helpers.get_milestone_relationship_types()
def test_entrance_exam_milestone_addition(self):
"""
Unit Test: test addition of entrance exam milestone content
"""
parent_locator = unicode(self.course.location)
created_block = create_xblock(
parent_locator=parent_locator,
user=self.user,
category='chapter',
display_name=('Entrance Exam'),
is_entrance_exam=True
)
add_entrance_exam_milestone(self.course.id, created_block)
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
unicode(created_block.location),
self.milestone_relationship_types['FULFILLS']
)
self.assertTrue(len(content_milestones))
self.assertEqual(len(milestones_helpers.get_course_milestones(self.course.id)), 1)
def test_entrance_exam_milestone_removal(self):
"""
Unit Test: test removal of entrance exam milestone content
"""
parent_locator = unicode(self.course.location)
created_block = create_xblock(
parent_locator=parent_locator,
user=self.user,
category='chapter',
display_name=('Entrance Exam'),
is_entrance_exam=True
)
add_entrance_exam_milestone(self.course.id, created_block)
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
unicode(created_block.location),
self.milestone_relationship_types['FULFILLS']
)
self.assertEqual(len(content_milestones), 1)
user = UserFactory()
request = RequestFactory().request()
request.user = user
remove_entrance_exam_milestone_reference(request, self.course.id)
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
unicode(created_block.location),
self.milestone_relationship_types['FULFILLS']
)
self.assertEqual(len(content_milestones), 0)
def test_contentstore_views_entrance_exam_post(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Reload the test course now that the exam module has been added
self.course = modulestore().get_course(self.course.id)
metadata = CourseMetadata.fetch_all(self.course)
self.assertTrue(metadata['entrance_exam_enabled'])
self.assertIsNotNone(metadata['entrance_exam_minimum_score_pct'])
self.assertIsNotNone(metadata['entrance_exam_id']['value'])
self.assertTrue(len(milestones_helpers.get_course_milestones(unicode(self.course.id))))
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
metadata['entrance_exam_id']['value'],
self.milestone_relationship_types['FULFILLS']
)
self.assertTrue(len(content_milestones))
def test_contentstore_views_entrance_exam_post_new_sequential_confirm_grader(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Reload the test course now that the exam module has been added
self.course = modulestore().get_course(self.course.id)
# Add a new child sequential to the exam module
# Confirm that the grader type is 'Entrance Exam'
chapter_locator_string = json.loads(resp.content).get('locator')
# chapter_locator = UsageKey.from_string(chapter_locator_string)
seq_data = {
'category': "sequential",
'display_name': "Entrance Exam Subsection",
'parent_locator': chapter_locator_string,
}
resp = self.client.ajax_post(reverse_url('xblock_handler'), seq_data)
seq_locator_string = json.loads(resp.content).get('locator')
seq_locator = UsageKey.from_string(seq_locator_string)
section_grader_type = CourseGradingModel.get_section_grader_type(seq_locator)
self.assertEqual(GRADER_TYPES['ENTRANCE_EXAM'], section_grader_type['graderType'])
def test_contentstore_views_entrance_exam_get(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get
"""
resp = self.client.post(
self.exam_url,
{'entrance_exam_minimum_score_pct': settings.ENTRANCE_EXAM_MIN_SCORE_PCT},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
def test_contentstore_views_entrance_exam_delete(self):
"""
Unit Test: test_contentstore_views_entrance_exam_delete
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
resp = self.client.delete(self.exam_url)
self.assertEqual(resp.status_code, 204)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
user = User.objects.create(
username='test_user',
email='[email protected]',
is_active=True,
)
user.set_password('test')
user.save()
milestones = milestones_helpers.get_course_milestones(unicode(self.course_key))
self.assertEqual(len(milestones), 1)
milestone_key = '{}.{}'.format(milestones[0]['namespace'], milestones[0]['name'])
paths = milestones_helpers.get_course_milestones_fulfillment_paths(
unicode(self.course_key),
milestones_helpers.serialize_user(user)
)
# What we have now is a course milestone requirement and no valid fulfillment
# paths for the specified user. The LMS is going to have to ignore this situation,
# because we can't confidently prevent it from occuring at some point in the future.
# milestone_key_1 =
self.assertEqual(len(paths[milestone_key]), 0)
# Re-adding an entrance exam to the course should fix the missing link
# It wipes out any old entrance exam artifacts and inserts a new exam course chapter/module
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Confirm that we have only one Entrance Exam grader after re-adding the exam (validates SOL-475)
graders = CourseGradingModel.fetch(self.course_key).graders
count = 0
for grader in graders:
if grader['type'] == GRADER_TYPES['ENTRANCE_EXAM']:
count += 1
self.assertEqual(count, 1)
def test_contentstore_views_entrance_exam_delete_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_delete_bogus_course
"""
resp = self.client.delete('/course/bad/course/key/entrance_exam')
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_bogus_course
"""
resp = self.client.get('/course/bad/course/key/entrance_exam')
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_bogus_exam(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_bogus_exam
"""
resp = self.client.post(
self.exam_url,
{'entrance_exam_minimum_score_pct': '50'},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
self.course = modulestore().get_course(self.course.id)
# Should raise an ItemNotFoundError and return a 404
updated_metadata = {'entrance_exam_id': 'i4x://org.4/course_4/chapter/ed7c4c6a4d68409998e2c8554c4629d1'}
CourseMetadata.update_from_dict(
updated_metadata,
self.course,
self.user,
)
self.course = modulestore().get_course(self.course.id)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
# Should raise an InvalidKeyError and return a 404
updated_metadata = {'entrance_exam_id': '123afsdfsad90f87'}
CourseMetadata.update_from_dict(
updated_metadata,
self.course,
self.user,
)
self.course = modulestore().get_course(self.course.id)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
def test_contentstore_views_entrance_exam_post_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post_bogus_course
"""
resp = self.client.post(
'/course/bad/course/key/entrance_exam',
{},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_post_invalid_http_accept(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post_invalid_http_accept
"""
resp = self.client.post(
'/course/bad/course/key/entrance_exam',
{},
http_accept='text/html'
)
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_invalid_user(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_invalid_user
"""
user = User.objects.create(
username='test_user',
email='[email protected]',
is_active=True,
)
user.set_password('test')
user.save()
self.client = AjaxEnabledTestClient()
self.client.login(username='test_user', password='test')
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 403)
def test_contentstore_views_entrance_exam_unsupported_method(self):
"""
Unit Test: test_contentstore_views_entrance_exam_unsupported_method
"""
resp = self.client.put(self.exam_url)
self.assertEqual(resp.status_code, 405)
def test_entrance_exam_view_direct_missing_score_setting(self):
"""
Unit Test: test_entrance_exam_view_direct_missing_score_setting
"""
user = UserFactory()
user.is_staff = True
request = RequestFactory()
request.user = user
resp = create_entrance_exam(request, self.course.id, None)
self.assertEqual(resp.status_code, 201)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False})
def test_entrance_exam_feature_flag_gating(self):
user = UserFactory()
user.is_staff = True
request = RequestFactory()
request.user = user
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 400)
resp = create_entrance_exam(request, self.course.id, None)
self.assertEqual(resp.status_code, 400)
resp = delete_entrance_exam(request, self.course.id)
self.assertEqual(resp.status_code, 400)
# No return, so we'll just ensure no exception is thrown
update_entrance_exam(request, self.course.id, {})
| agpl-3.0 |
SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_console.py | 1 | 9547 | '''An helper file for the pydev debugger (REPL) console
'''
import sys
import traceback
from code import InteractiveConsole
from _pydev_bundle import _pydev_completer
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from _pydev_bundle.pydev_imports import Exec
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle import pydevd_save_locals
from _pydevd_bundle.pydevd_io import IOBuf
from pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle.pydevd_xml import make_valid_xml_value
CONSOLE_OUTPUT = "output"
CONSOLE_ERROR = "error"
#=======================================================================================================================
# ConsoleMessage
#=======================================================================================================================
class ConsoleMessage:
"""Console Messages
"""
def __init__(self):
self.more = False
# List of tuple [('error', 'error_message'), ('message_list', 'output_message')]
self.console_messages = []
def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m))
def update_more(self, more):
"""more is set to true if further input is required from the user
else more is set to false
"""
self.more = more
def to_xml(self):
"""Create an XML for console message_list, error and more (true/false)
<xml>
<message_list>console message_list</message_list>
<error>console error</error>
<more>true/false</more>
</xml>
"""
makeValid = make_valid_xml_value
xml = '<xml><more>%s</more>' % (self.more)
for message_type, message in self.console_messages:
xml += '<%s message="%s"></%s>' % (message_type, makeValid(message), message_type)
xml += '</xml>'
return xml
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
@overrides(BaseStdIn.readline)
def readline(self, *args, **kwargs):
sys.stderr.write('Warning: Reading from stdin is still not supported in this console.\n')
return '\n'
#=======================================================================================================================
# DebugConsole
#=======================================================================================================================
class DebugConsole(InteractiveConsole, BaseInterpreterInterface):
"""Wrapper around code.InteractiveConsole, in order to send
errors and outputs to the debug console
"""
@overrides(BaseInterpreterInterface.create_std_in)
def create_std_in(self, *args, **kwargs):
try:
if not self.__buffer_output:
return sys.stdin
except:
pass
return DebugConsoleStdIn() #If buffered, raw_input is not supported in this console.
@overrides(InteractiveConsole.push)
def push(self, line, frame, buffer_output=True):
"""Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
:param buffer_output: if False won't redirect the output.
Return boolean (True if more input is required else False),
output_messages and input_messages
"""
self.__buffer_output = buffer_output
more = False
if buffer_output:
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
try:
self.frame = frame
if buffer_output:
out = sys.stdout = IOBuf()
err = sys.stderr = IOBuf()
more = self.add_exec(line)
except Exception:
exc = get_exception_traceback_str()
if buffer_output:
err.buflist.append("Internal Error: %s" % (exc,))
else:
sys.stderr.write("Internal Error: %s\n" % (exc,))
finally:
#Remove frame references.
self.frame = None
frame = None
if buffer_output:
sys.stdout = original_stdout
sys.stderr = original_stderr
if buffer_output:
return more, out.buflist, err.buflist
else:
return more, [], []
@overrides(BaseInterpreterInterface.do_add_exec)
def do_add_exec(self, line):
return InteractiveConsole.push(self, line)
@overrides(InteractiveConsole.runcode)
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
Exec(code, self.frame.f_globals, self.frame.f_locals)
pydevd_save_locals.save_locals(self.frame)
except SystemExit:
raise
except:
# In case sys.excepthook called, use original excepthook #PyDev-877: Debug console freezes with Python 3.5+
# (showtraceback does it on python 3.5 onwards)
sys.excepthook = sys.__excepthook__
try:
self.showtraceback()
finally:
sys.__excepthook__ = sys.excepthook
def get_namespace(self):
dbg_namespace = {}
dbg_namespace.update(self.frame.f_globals)
dbg_namespace.update(self.frame.f_locals) # locals later because it has precedence over the actual globals
return dbg_namespace
#=======================================================================================================================
# InteractiveConsoleCache
#=======================================================================================================================
class InteractiveConsoleCache:
thread_id = None
frame_id = None
interactive_console_instance = None
#Note: On Jython 2.1 we can't use classmethod or staticmethod, so, just make the functions below free-functions.
def get_interactive_console(thread_id, frame_id, frame, console_message):
"""returns the global interactive console.
interactive console should have been initialized by this time
:rtype: DebugConsole
"""
if InteractiveConsoleCache.thread_id == thread_id and InteractiveConsoleCache.frame_id == frame_id:
return InteractiveConsoleCache.interactive_console_instance
InteractiveConsoleCache.interactive_console_instance = DebugConsole()
InteractiveConsoleCache.thread_id = thread_id
InteractiveConsoleCache.frame_id = frame_id
console_stacktrace = traceback.extract_stack(frame, limit=1)
if console_stacktrace:
current_context = console_stacktrace[0] # top entry from stacktrace
context_message = 'File "%s", line %s, in %s' % (current_context[0], current_context[1], current_context[2])
console_message.add_console_message(CONSOLE_OUTPUT, "[Current context]: %s" % (context_message,))
return InteractiveConsoleCache.interactive_console_instance
def clear_interactive_console():
InteractiveConsoleCache.thread_id = None
InteractiveConsoleCache.frame_id = None
InteractiveConsoleCache.interactive_console_instance = None
def execute_console_command(frame, thread_id, frame_id, line, buffer_output=True):
"""fetch an interactive console instance from the cache and
push the received command to the console.
create and return an instance of console_message
"""
console_message = ConsoleMessage()
interpreter = get_interactive_console(thread_id, frame_id, frame, console_message)
more, output_messages, error_messages = interpreter.push(line, frame, buffer_output)
console_message.update_more(more)
for message in output_messages:
console_message.add_console_message(CONSOLE_OUTPUT, message)
for message in error_messages:
console_message.add_console_message(CONSOLE_ERROR, message)
return console_message
def get_description(frame, thread_id, frame_id, expression):
console_message = ConsoleMessage()
interpreter = get_interactive_console(thread_id, frame_id, frame, console_message)
try:
interpreter.frame = frame
return interpreter.getDescription(expression)
finally:
interpreter.frame = None
def get_completions(frame, act_tok):
""" fetch all completions, create xml for the same
return the completions xml
"""
return _pydev_completer.generate_completions_as_xml(frame, act_tok)
| bsd-3-clause |
robbiet480/home-assistant | homeassistant/components/min_max/sensor.py | 3 | 6925 | """Support for displaying the minimal and the maximal value."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_TYPE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
_LOGGER = logging.getLogger(__name__)
ATTR_MIN_VALUE = "min_value"
ATTR_MIN_ENTITY_ID = "min_entity_id"
ATTR_MAX_VALUE = "max_value"
ATTR_MAX_ENTITY_ID = "max_entity_id"
ATTR_COUNT_SENSORS = "count_sensors"
ATTR_MEAN = "mean"
ATTR_LAST = "last"
ATTR_LAST_ENTITY_ID = "last_entity_id"
ATTR_TO_PROPERTY = [
ATTR_COUNT_SENSORS,
ATTR_MAX_VALUE,
ATTR_MAX_ENTITY_ID,
ATTR_MEAN,
ATTR_MIN_VALUE,
ATTR_MIN_ENTITY_ID,
ATTR_LAST,
ATTR_LAST_ENTITY_ID,
]
CONF_ENTITY_IDS = "entity_ids"
CONF_ROUND_DIGITS = "round_digits"
ICON = "mdi:calculator"
SENSOR_TYPES = {
ATTR_MIN_VALUE: "min",
ATTR_MAX_VALUE: "max",
ATTR_MEAN: "mean",
ATTR_LAST: "last",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=SENSOR_TYPES[ATTR_MAX_VALUE]): vol.All(
cv.string, vol.In(SENSOR_TYPES.values())
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_IDS): cv.entity_ids,
vol.Optional(CONF_ROUND_DIGITS, default=2): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the min/max/mean sensor."""
entity_ids = config.get(CONF_ENTITY_IDS)
name = config.get(CONF_NAME)
sensor_type = config.get(CONF_TYPE)
round_digits = config.get(CONF_ROUND_DIGITS)
async_add_entities(
[MinMaxSensor(hass, entity_ids, name, sensor_type, round_digits)], True
)
return True
def calc_min(sensor_values):
"""Calculate min value, honoring unknown states."""
val = None
entity_id = None
for sensor_id, sensor_value in sensor_values:
if sensor_value != STATE_UNKNOWN:
if val is None or val > sensor_value:
entity_id, val = sensor_id, sensor_value
return entity_id, val
def calc_max(sensor_values):
"""Calculate max value, honoring unknown states."""
val = None
entity_id = None
for sensor_id, sensor_value in sensor_values:
if sensor_value != STATE_UNKNOWN:
if val is None or val < sensor_value:
entity_id, val = sensor_id, sensor_value
return entity_id, val
def calc_mean(sensor_values, round_digits):
"""Calculate mean value, honoring unknown states."""
sensor_value_sum = 0
count = 0
for _, sensor_value in sensor_values:
if sensor_value != STATE_UNKNOWN:
sensor_value_sum += sensor_value
count += 1
if count == 0:
return None
return round(sensor_value_sum / count, round_digits)
class MinMaxSensor(Entity):
"""Representation of a min/max sensor."""
def __init__(self, hass, entity_ids, name, sensor_type, round_digits):
"""Initialize the min/max sensor."""
self._hass = hass
self._entity_ids = entity_ids
self._sensor_type = sensor_type
self._round_digits = round_digits
if name:
self._name = name
else:
self._name = f"{next(v for k, v in SENSOR_TYPES.items() if self._sensor_type == v)} sensor".capitalize()
self._unit_of_measurement = None
self._unit_of_measurement_mismatch = False
self.min_value = self.max_value = self.mean = self.last = None
self.min_entity_id = self.max_entity_id = self.last_entity_id = None
self.count_sensors = len(self._entity_ids)
self.states = {}
@callback
def async_min_max_sensor_state_listener(entity, old_state, new_state):
"""Handle the sensor state changes."""
if new_state.state is None or new_state.state in [
STATE_UNKNOWN,
STATE_UNAVAILABLE,
]:
self.states[entity] = STATE_UNKNOWN
hass.async_add_job(self.async_update_ha_state, True)
return
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if self._unit_of_measurement != new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
):
_LOGGER.warning(
"Units of measurement do not match for entity %s", self.entity_id
)
self._unit_of_measurement_mismatch = True
try:
self.states[entity] = float(new_state.state)
self.last = float(new_state.state)
self.last_entity_id = entity
except ValueError:
_LOGGER.warning(
"Unable to store state. Only numerical states are supported"
)
hass.async_add_job(self.async_update_ha_state, True)
async_track_state_change(hass, entity_ids, async_min_max_sensor_state_listener)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._unit_of_measurement_mismatch:
return None
return getattr(
self, next(k for k, v in SENSOR_TYPES.items() if self._sensor_type == v)
)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._unit_of_measurement_mismatch:
return "ERR"
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
return state_attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
async def async_update(self):
"""Get the latest data and updates the states."""
sensor_values = [
(entity_id, self.states[entity_id])
for entity_id in self._entity_ids
if entity_id in self.states
]
self.min_entity_id, self.min_value = calc_min(sensor_values)
self.max_entity_id, self.max_value = calc_max(sensor_values)
self.mean = calc_mean(sensor_values, self._round_digits)
| apache-2.0 |
mcedit/mcedit | drawable.py | 1 | 1360 | """
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from OpenGL import GL
class Drawable(object):
def __init__(self):
super(Drawable, self).__init__()
self._displayList = None
self.invalidList = True
self.children = []
def setUp(self):
"""
Set up rendering settings and view matrices
:return:
:rtype:
"""
def tearDown(self):
"""
Return any settings changed in setUp to their previous states
:return:
:rtype:
"""
def drawSelf(self):
"""
Draw this drawable, if it has its own graphics.
:return:
:rtype:
"""
def _draw(self):
self.setUp()
self.drawSelf()
for child in self.children:
child.draw()
self.tearDown()
def draw(self):
if self._displayList is None:
self._displayList = GL.glGenLists(1)
if self.invalidList:
self.compileList()
GL.glCallList(self._displayList)
def compileList(self):
GL.glNewList(self._displayList, GL.GL_COMPILE)
self._draw()
GL.glEndList()
self.invalidList = False
def invalidate(self):
self.invalidList = True
| isc |
a4a881d4/lovecoin | contrib/spendfrom/spendfrom.py | 792 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
moutai/scikit-learn | sklearn/utils/linear_assignment_.py | 67 | 9524 | """
Solve the unique lowest-cost assignment problem using the
Hungarian algorithm (also known as Munkres algorithm).
"""
# Based on original code by Brain Clapper, adapted to NumPy by Gael Varoquaux.
# Heavily refactored by Lars Buitinck.
#
# TODO: a version of this algorithm has been incorporated in SciPy; use that
# when SciPy 0.17 is released.
# Copyright (c) 2008 Brian M. Clapper <[email protected]>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# LICENSE: BSD
import numpy as np
from .fixes import astype
def linear_assignment(X):
"""Solve the linear assignment problem using the Hungarian algorithm.
The problem is also known as maximum weight matching in bipartite graphs.
The method is also known as the Munkres or Kuhn-Munkres algorithm.
Parameters
----------
X : array
The cost matrix of the bipartite graph
Returns
-------
indices : array,
The pairs of (row, col) indices in the original array giving
the original ordering.
References
----------
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
indices = _hungarian(X).tolist()
indices.sort()
# Re-force dtype to ints in case of empty list
indices = np.array(indices, dtype=int)
# Make sure the array is 2D with 2 columns.
# This is needed when dealing with an empty list
indices.shape = (-1, 2)
return indices
class _HungarianState(object):
"""State of one execution of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
"""
def __init__(self, cost_matrix):
cost_matrix = np.atleast_2d(cost_matrix)
# If there are more rows (n) than columns (m), then the algorithm
# will not be able to work correctly. Therefore, we
# transpose the cost function when needed. Just have to
# remember to swap the result columns back later.
transposed = (cost_matrix.shape[1] < cost_matrix.shape[0])
if transposed:
self.C = (cost_matrix.T).copy()
else:
self.C = cost_matrix.copy()
self.transposed = transposed
# At this point, m >= n.
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=np.bool)
self.col_uncovered = np.ones(m, dtype=np.bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = np.argmax(self.marked[row] == 2)
if self.marked[row, col] != 2:
col = -1
return col
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
def _hungarian(cost_matrix):
"""The Hungarian algorithm.
Calculate the Munkres solution to the classical assignment problem and
return the indices for the lowest-cost pairings.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
Returns
-------
indices : 2D array of indices
The pairs of (row, col) indices in the original array giving
the original ordering.
"""
state = _HungarianState(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
# Look for the starred columns
results = np.array(np.where(state.marked == 1)).T
# We need to swap the columns because we originally
# did a transpose on the input cost matrix.
if state.transposed:
results = results[:, ::-1]
return results
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(np.int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= astype(state.col_uncovered, dtype=np.int, copy=False)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if not state.marked[row, star_col] == 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
astype(state.row_uncovered, dtype=np.int, copy=False))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if not state.marked[row, path[count, 1]] == 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[np.logical_not(state.row_uncovered)] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
| bsd-3-clause |
tesidroni/mp | Lib/ntpath.py | 81 | 18082 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| gpl-3.0 |
cynapse/cynin | products/Plone4ArtistsCalendar/pythonlib/p4a/common/at.py | 4 | 2479 | from Acquisition import aq_inner, aq_base
from zope import schema
from zope.formlib import form
from zope.app.form.browser.textwidgets import TextAreaWidget
from Products.Five.formlib.formbase import PageDisplayForm, PageForm
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFCore import utils as cmfutils
from Products.Archetypes import Field, Widget
from Products.ATContentTypes.content import document
class RichTextEditWidget(BrowserView, TextAreaWidget):
"""A Zope 3 based formlib widget that exposes whatever rich text
editor is configured inside Plone.
"""
template = ViewPageTemplateFile('atwidget.pt')
def __init__(self, *args, **kwargs):
BrowserView.__init__(self, *args, **kwargs)
TextAreaWidget.__init__(self, *args, **kwargs)
def content_context(self):
current = aq_inner(self.context.context)
content_context = None
for x in range(100):
if hasattr(current, '__of__'):
content_context = current
break
if hasattr(current, 'context'):
current = current.context
else:
break
return content_context
def __call__(self):
self.context.REQUEST = self.request
if not 'body' in self.request.form:
self.request.form['body'] = self.context.get(self.context.context)
template = aq_base(self.template)
widget = aq_base(self)
content_context = self.content_context()
template = template.__of__(widget.__of__(content_context))
return template()
def hasInput(self):
return 'body' in self.request.form
def getInputValue(self):
return self.request.form.get('body', None)
form_fields = form.FormFields(
schema.Text(__name__='simpletext',
title=u'Simple Text',
required=False),
schema.Text(__name__='richtext',
title=u'Rich Text',
required=False),
)
class TestEditFieldsView(PageForm):
"""
"""
label = u'Test Fields'
form_fields = form_fields
form_fields['richtext'].custom_widget = RichTextEditWidget
@form.action('Save')
def handle_save_action(self, action, data):
pass
class TestDisplayFieldsView(PageDisplayForm):
"""
"""
label = u'Test Fields'
form_fields = form_fields
actions = ()
| gpl-3.0 |
bdastur/notes | python/asyncio/future_1.py | 1 | 2115 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
https://pymotw.com/3/asyncio/futures.html
'''
import asyncio
import time
import functools
#import gc
#gc.set_debug(gc.DEBUG_STATS)
def mark_done(future, result):
print("Setting future result to ", result)
future.set_result(result)
async def work_one(future):
while True:
print("Start One!")
time.sleep(1)
# A async sleep here passes the control back to the event loop.
# without that, this coroutine will continue holding the context and
# never let go.
if future.done():
print("We Have a future that is done")
else:
print("Future is not done yet")
await asyncio.sleep(2)
loop = asyncio.get_running_loop()
print("One Done! Time: ", loop.time())
loop.call_soon(mark_done, future, 'pass')
print("After calling mark done")
if future.done():
print("We reached our state")
def callback(future, n):
print("Future Done callback: ", future.result(), n)
def register_future_callback(future):
print("Registering callbacks")
future.add_done_callback(functools.partial(callback, n=44))
future.add_done_callback(functools.partial(callback, n=102))
# Run until complete.
def run_forever():
# Instead of get_event_loop, using
# new event loop to create new one and setting it as current
# event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
print("Loop: ", loop)
try:
# this function returns the obj which could be a future, a task
# a task object wrapping obj, if the obj is a coroutine.
all_done = asyncio.Future()
register_future_callback(all_done)
obj1 = asyncio.ensure_future(work_one(all_done))
print("Time: ", loop.time())
loop.run_forever()
#result = loop.run_until_complete(all_done)
#print("Result here ", result)
except KeyboardInterrupt:
pass
finally:
print("Closing loop")
loop.close()
if __name__ == '__main__':
run_forever()
| apache-2.0 |
gurneyalex/OpenUpgrade | addons/account_anglo_saxon/purchase.py | 427 | 2043 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order(osv.osv):
_name = "purchase.order"
_inherit = "purchase.order"
_description = "Purchase Order"
def _choose_account_from_po_line(self, cr, uid, order_line, context=None):
account_id = super(purchase_order, self)._choose_account_from_po_line(cr, uid, order_line, context=context)
if order_line.product_id and not order_line.product_id.type == 'service':
acc_id = order_line.product_id.property_stock_account_input and order_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = order_line.product_id.categ_id.property_stock_account_input_categ and order_line.product_id.categ_id.property_stock_account_input_categ.id
if acc_id:
fpos = order_line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
poffuomo/spark | examples/src/main/python/ml/vector_indexer_example.py | 123 | 1685 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import VectorIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("VectorIndexerExample")\
.getOrCreate()
# $example on$
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
indexer = VectorIndexer(inputCol="features", outputCol="indexed", maxCategories=10)
indexerModel = indexer.fit(data)
categoricalFeatures = indexerModel.categoryMaps
print("Chose %d categorical features: %s" %
(len(categoricalFeatures), ", ".join(str(k) for k in categoricalFeatures.keys())))
# Create new column "indexed" with categorical values transformed to indices
indexedData = indexerModel.transform(data)
indexedData.show()
# $example off$
spark.stop()
| apache-2.0 |
jumpstarter-io/horizon | openstack_dashboard/test/api_tests/cinder_tests.py | 1 | 7570 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.test.utils import override_settings
import cinderclient as cinder_client
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CinderApiTests(test.APITestCase):
def test_volume_list(self):
search_opts = {'all_tenants': 1}
volumes = self.cinder_volumes.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.volume_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list(self):
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list_no_volume_configured(self):
# remove volume from service catalog
catalog = self.service_catalog
for service in catalog:
if service["type"] == "volume":
self.service_catalog.remove(service)
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
class CinderApiVersionTests(test.TestCase):
def setUp(self):
super(CinderApiVersionTests, self).setUp()
# The version is set when the module is loaded. Reset the
# active version each time so that we can test with different
# versions.
api.cinder.VERSIONS._active = None
def test_default_client_is_v1(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v1.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_v1_setting_returns_v1_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v1.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_v2_setting_returns_v2_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
def test_get_v1_volume_attributes(self):
# Get a v1 volume
volume = self.cinder_volumes.first()
self.assertTrue(hasattr(volume._apiresource, 'display_name'))
self.assertFalse(hasattr(volume._apiresource, 'name'))
name = "A test volume name"
description = "A volume description"
setattr(volume._apiresource, 'display_name', name)
setattr(volume._apiresource, 'display_description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v2_volume_attributes(self):
# Get a v2 volume
volume = self.cinder_volumes.get(name="v2_volume")
self.assertTrue(hasattr(volume._apiresource, 'name'))
self.assertFalse(hasattr(volume._apiresource, 'display_name'))
name = "A v2 test volume name"
description = "A v2 volume description"
setattr(volume._apiresource, 'name', name)
setattr(volume._apiresource, 'description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v1_snapshot_attributes(self):
# Get a v1 snapshot
snapshot = self.cinder_volume_snapshots.first()
self.assertFalse(hasattr(snapshot._apiresource, 'name'))
name = "A test snapshot name"
description = "A snapshot description"
setattr(snapshot._apiresource, 'display_name', name)
setattr(snapshot._apiresource, 'display_description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_v2_snapshot_attributes(self):
# Get a v2 snapshot
snapshot = self.cinder_volume_snapshots.get(
description="v2 volume snapshot description")
self.assertFalse(hasattr(snapshot._apiresource, 'display_name'))
name = "A v2 test snapshot name"
description = "A v2 snapshot description"
setattr(snapshot._apiresource, 'name', name)
setattr(snapshot._apiresource, 'description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_id_for_nameless_volume(self):
volume = self.cinder_volumes.first()
setattr(volume._apiresource, 'display_name', "")
self.assertEqual(volume.id, volume.name)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_adapt_dictionary_to_v1(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('display_name', ret_data.keys())
self.assertIn('display_description', ret_data.keys())
self.assertNotIn('name', ret_data.keys())
self.assertNotIn('description', ret_data.keys())
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_adapt_dictionary_to_v2(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('name', ret_data.keys())
self.assertIn('description', ret_data.keys())
self.assertNotIn('display_name', ret_data.keys())
self.assertNotIn('display_description', ret_data.keys())
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_version_get_1(self):
version = api.cinder.version_get()
self.assertEqual(version, 1)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_version_get_2(self):
version = api.cinder.version_get()
self.assertEqual(version, 2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_retype_not_supported(self):
retype_supported = api.cinder.retype_supported()
self.assertFalse(retype_supported)
| apache-2.0 |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/db/backends/base/base.py | 103 | 17962 | import time
import warnings
from collections import deque
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import utils
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper
from django.utils.functional import cached_property
from django.utils.six.moves import _thread as thread
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
SchemaEditorClass = None
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = thread.get_ident()
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initializes the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors:
return self.create_cursor()
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
self.validate_thread_sharing()
if self.queries_logged:
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = self.make_cursor(self._cursor())
return cursor
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
def close(self):
"""
Closes the connection to the database.
"""
self.validate_thread_sharing()
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Releases a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""
Check the autocommit state.
"""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit):
"""
Enable or disable autocommit.
"""
self.validate_no_atomic_block()
self.ensure_connection()
self._set_autocommit(autocommit)
self.autocommit = autocommit
def get_rollback(self):
"""
Get the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Context manager that disables foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Tests if the database connection is usable.
This function may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Closes the current connection if unrecoverable errors have occurred,
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if not (self.allow_thread_sharing
or self._thread_ident == thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def make_debug_cursor(self, cursor):
"""
Creates a cursor that logs all queries in self.queries_log.
"""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""
Creates a cursor without debug logging.
"""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provides a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
cursor = self.cursor()
try:
yield cursor
finally:
cursor.close()
if must_close:
self.close()
@cached_property
def _nodb_connection(self):
"""
Alternative connection to be used when there is no need to access
the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = None
nodb_connection = self.__class__(
settings_dict,
alias=NO_DB_ALIAS,
allow_thread_sharing=False)
return nodb_connection
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError(
'subclasses of BaseDatabaseWrapper may require a '
'_start_transaction_under_autocommit() method'
)
def schema_editor(self, *args, **kwargs):
"""
Returns a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
| mit |
75651/kbengine_cloud | kbe/src/lib/python/Lib/encodings/mbcs.py | 860 | 1211 | """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond ([email protected]) from ascii.py,
which was written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-3.0 |
Dahlgren/HTPC-Manager | libs/requests/adapters.py | 205 | 16799 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit |
MarkusH/django-osm-field | example/example/views.py | 1 | 1290 | from django.urls import reverse
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
)
from .models import ExampleModel
class ExampleCreateView(CreateView):
fields = [
"location",
"location_lat",
"location_lon",
"another",
"some_lat_field",
"other_lon_field",
]
model = ExampleModel
def get_success_url(self):
return reverse("detail", kwargs={"pk": self.object.pk})
create_view = ExampleCreateView.as_view()
class ExampleDeleteView(DeleteView):
model = ExampleModel
def get_success_url(self):
return reverse("list")
delete_view = ExampleDeleteView.as_view()
class ExampleDetailView(DetailView):
model = ExampleModel
detail_view = ExampleDetailView.as_view()
class ExampleListView(ListView):
model = ExampleModel
list_view = ExampleListView.as_view()
class ExampleUpdateView(UpdateView):
fields = [
"location",
"location_lat",
"location_lon",
"another",
"some_lat_field",
"other_lon_field",
]
model = ExampleModel
def get_success_url(self):
return reverse("detail", kwargs={"pk": self.object.pk})
update_view = ExampleUpdateView.as_view()
| mit |
QuintonJason/qvids | build/x264/tools/digress/cli.py | 145 | 4413 | """
Digress's CLI interface.
"""
import inspect
import sys
from optparse import OptionParser
import textwrap
from types import MethodType
from digress import __version__ as version
def dispatchable(func):
"""
Mark a method as dispatchable.
"""
func.digress_dispatchable = True
return func
class Dispatcher(object):
"""
Dispatcher for CLI commands.
"""
def __init__(self, fixture):
self.fixture = fixture
fixture.dispatcher = self
def _monkey_print_help(self, optparse, *args, **kwargs):
# monkey patches OptionParser._print_help
OptionParser.print_help(optparse, *args, **kwargs)
print >>sys.stderr, "\nAvailable commands:"
maxlen = max([ len(command_name) for command_name in self.commands ])
descwidth = 80 - maxlen - 4
for command_name, command_meth in self.commands.iteritems():
print >>sys.stderr, " %s %s\n" % (
command_name.ljust(maxlen + 1),
("\n" + (maxlen + 4) * " ").join(
textwrap.wrap(" ".join(filter(
None,
command_meth.__doc__.strip().replace("\n", " ").split(" ")
)),
descwidth
)
)
)
def _enable_flush(self):
self.fixture.flush_before = True
def _populate_parser(self):
self.commands = self._get_commands()
self.optparse = OptionParser(
usage = "usage: %prog [options] command [args]",
description = "Digress CLI frontend for %s." % self.fixture.__class__.__name__,
version = "Digress %s" % version
)
self.optparse.print_help = MethodType(self._monkey_print_help, self.optparse, OptionParser)
self.optparse.add_option(
"-f",
"--flush",
action="callback",
callback=lambda option, opt, value, parser: self._enable_flush(),
help="flush existing data for a revision before testing"
)
self.optparse.add_option(
"-c",
"--cases",
metavar="FOO,BAR",
action="callback",
dest="cases",
type=str,
callback=lambda option, opt, value, parser: self._select_cases(*value.split(",")),
help="test cases to run, run with command list to see full list"
)
def _select_cases(self, *cases):
self.fixture.cases = filter(lambda case: case.__name__ in cases, self.fixture.cases)
def _get_commands(self):
commands = {}
for name, member in inspect.getmembers(self.fixture):
if hasattr(member, "digress_dispatchable"):
commands[name] = member
return commands
def _run_command(self, name, *args):
if name not in self.commands:
print >>sys.stderr, "error: %s is not a valid command\n" % name
self.optparse.print_help()
return
command = self.commands[name]
argspec = inspect.getargspec(command)
max_arg_len = len(argspec.args) - 1
min_arg_len = max_arg_len - ((argspec.defaults is not None) and len(argspec.defaults) or 0)
if len(args) < min_arg_len:
print >>sys.stderr, "error: %s takes at least %d arguments\n" % (
name,
min_arg_len
)
print >>sys.stderr, "%s\n" % command.__doc__
self.optparse.print_help()
return
if len(args) > max_arg_len:
print >>sys.stderr, "error: %s takes at most %d arguments\n" % (
name,
max_arg_len
)
print >>sys.stderr, "%s\n" % command.__doc__
self.optparse.print_help()
return
command(*args)
def pre_dispatch(self):
pass
def dispatch(self):
self._populate_parser()
self.optparse.parse_args()
self.pre_dispatch()
args = self.optparse.parse_args()[1] # arguments may require reparsing after pre_dispatch; see test_x264.py
if len(args) == 0:
print >>sys.stderr, "error: no comamnd specified\n"
self.optparse.print_help()
return
command = args[0]
addenda = args[1:]
self._run_command(command, *addenda)
| mit |
Zord13appdesa/python-for-android | python-build/python-libs/gdata/src/gdata/Crypto/PublicKey/qNEW.py | 228 | 5545 | #
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
| apache-2.0 |
ujjvala-addsol/addsol_hr | openerp/addons/gamification/tests/test_challenge.py | 386 | 5133 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class test_challenge(common.TransactionCase):
def setUp(self):
super(test_challenge, self).setUp()
cr, uid = self.cr, self.uid
self.data_obj = self.registry('ir.model.data')
self.user_obj = self.registry('res.users')
self.challenge_obj = self.registry('gamification.challenge')
self.line_obj = self.registry('gamification.challenge.line')
self.goal_obj = self.registry('gamification.goal')
self.badge_obj = self.registry('gamification.badge')
self.badge_user_obj = self.registry('gamification.badge.user')
self.demo_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'user_demo')[1]
self.group_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'group_user')[1]
self.challenge_base_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'challenge_base_discover')[1]
self.definition_timezone_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'definition_base_timezone')[1]
self.badge_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'badge_good_job')[1]
def test_00_join_challenge(self):
cr, uid, context = self.cr, self.uid, {}
user_ids = self.user_obj.search(cr, uid, [('groups_id', '=', self.group_user_id)])
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids), "Not enough users in base challenge")
self.user_obj.create(cr, uid, {
'name': 'R2D2',
'login': '[email protected]',
'email': '[email protected]',
'groups_id': [(6, 0, [self.group_user_id])]
}, {'no_reset_password': True})
self.challenge_obj._update_all(cr, uid, [self.challenge_base_id], context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids)+1, "These are not droids you are looking for")
def test_10_reach_challenge(self):
cr, uid, context = self.cr, self.uid, {}
self.challenge_obj.write(cr, uid, [self.challenge_base_id], {'state': 'inprogress'}, context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
challenge_user_ids = [user.id for user in challenge.user_ids]
self.assertEqual(challenge.state, 'inprogress', "Challenge failed the change of state")
line_ids = self.line_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id)], context=context)
goal_ids = self.goal_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id), ('state', '!=', 'draft')], context=context)
self.assertEqual(len(goal_ids), len(line_ids)*len(challenge_user_ids), "Incorrect number of goals generated, should be 1 goal per user, per challenge line")
# demo user will set a timezone
self.user_obj.write(cr, uid, self.demo_user_id, {'tz': "Europe/Brussels"}, context=context)
goal_ids = self.goal_obj.search(cr, uid, [('user_id', '=', self.demo_user_id), ('definition_id', '=', self.definition_timezone_id)], context=context)
self.goal_obj.update(cr, uid, goal_ids, context=context)
reached_goal_ids = self.goal_obj.search(cr, uid, [('id', 'in', goal_ids), ('state', '=', 'reached')], context=context)
self.assertEqual(set(goal_ids), set(reached_goal_ids), "Not every goal was reached after changing timezone")
# reward for two firsts as admin may have timezone
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'reward_first_id': self.badge_id, 'reward_second_id': self.badge_id}, context=context)
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'state': 'done'}, context=context)
badge_ids = self.badge_user_obj.search(cr, uid, [('badge_id', '=', self.badge_id), ('user_id', '=', self.demo_user_id)])
self.assertGreater(len(badge_ids), 0, "Demo user has not received the badge") | agpl-3.0 |
tudorvio/nova | nova/tests/unit/keymgr/test_single_key_mgr.py | 78 | 2448 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the single key manager.
"""
import array
from nova import exception
from nova.keymgr import key
from nova.keymgr import single_key_mgr
from nova.tests.unit.keymgr import test_mock_key_mgr
class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase):
def _create_key_manager(self):
return single_key_mgr.SingleKeyManager()
def setUp(self):
super(SingleKeyManagerTestCase, self).setUp()
self.key_id = '00000000-0000-0000-0000-000000000000'
encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
self.key = key.SymmetricKey('AES', encoded)
def test___init__(self):
self.assertEqual(self.key,
self.key_mgr.get_key(self.ctxt, self.key_id))
def test_create_key(self):
key_id_1 = self.key_mgr.create_key(self.ctxt)
key_id_2 = self.key_mgr.create_key(self.ctxt)
# ensure that the UUIDs are the same
self.assertEqual(key_id_1, key_id_2)
def test_create_key_with_length(self):
pass
def test_store_null_context(self):
self.assertRaises(exception.Forbidden,
self.key_mgr.store_key, None, self.key)
def test_copy_key(self):
key_id = self.key_mgr.create_key(self.ctxt)
key = self.key_mgr.get_key(self.ctxt, key_id)
copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
self.assertEqual(key_id, copied_key_id)
self.assertEqual(key, copied_key)
def test_delete_key(self):
pass
def test_delete_unknown_key(self):
self.assertRaises(exception.KeyManagerError,
self.key_mgr.delete_key, self.ctxt, None)
| apache-2.0 |
tjsavage/sfcsdatabase | django/conf/locale/en_GB/formats.py | 80 | 1770 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
yg257/Pangea | templates/root/ec2/lib/boto-2.34.0/boto/ec2/autoscale/activity.py | 152 | 3058 | # Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
class Activity(object):
def __init__(self, connection=None):
self.connection = connection
self.start_time = None
self.end_time = None
self.activity_id = None
self.progress = None
self.status_code = None
self.cause = None
self.description = None
self.status_message = None
self.group_name = None
def __repr__(self):
return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id,
self.group_name,
self.status_message,
self.cause)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ActivityId':
self.activity_id = value
elif name == 'AutoScalingGroupName':
self.group_name = value
elif name == 'StartTime':
try:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'EndTime':
try:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'Progress':
self.progress = value
elif name == 'Cause':
self.cause = value
elif name == 'Description':
self.description = value
elif name == 'StatusMessage':
self.status_message = value
elif name == 'StatusCode':
self.status_code = value
else:
setattr(self, name, value)
| apache-2.0 |
kspviswa/personfinder | tools/admin.py | 19 | 5570 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for administration in the interactive console."""
from model import *
from utils import *
import logging
import pickle
class Mapper(object):
# Subclasses should replace this with a model class (eg, model.Person).
KIND = None
# Subclasses can replace this with a list of (property, value) tuples
# to filter by.
FILTERS = []
def map(self, entity):
"""Updates a single entity.
Implementers should return a tuple containing two iterables
(to_update, to_delete)."""
return ([], [])
def get_query(self):
"""Returns a query over the specified kind, with any appropriate
filters applied."""
q = self.KIND.all()
for prop, value in self.FILTERS:
q.filter("%s =" % prop, value)
q.order("__key__")
return q
def run(self, batch_size=100):
"""Executes the map procedure over all matching entities."""
q = self.get_query()
entities = q.fetch(batch_size)
while entities:
to_put = []
to_delete = []
for entity in entities:
map_updates, map_deletes = self.map(entity)
to_put.extend(map_updates)
to_delete.extend(map_deletes)
if to_put:
db.put(to_put)
logging.info('entities written: %d' % len(to_put))
if to_delete:
db.delete(to_delete)
logging.info('entities deleted: %d' % len(to_delete))
q = self.get_query()
q.filter("__key__ >", entities[-1].key())
entities = q.fetch(batch_size)
class Reindexer(Mapper):
KIND = Person
def map(self, entity):
# This updates both old and new index and we need it for now,
# as first stage of deployment.
entity.update_index(['old','new'])
# Use the next line to index only with new index
#indexing.update_index_properties(entity)
return [entity], []
def Person_repr(person):
return '<Person %s %r>' % (
person.record_id, person.primary_full_name)
def Note_repr(note):
return '<Note %s for %s by %r at %s>' % (
note.record_id, note.person_record_id,
note.author_name, note.entry_date)
Person.__repr__ = Person_repr
Note.__repr__ = Note_repr
def expand_id(repo, id):
id = str(id)
if '/' not in id:
id = repo + '.' + HOME_DOMAIN + '/person.' + id
return id
def clear_found(id):
person = get_person(id)
person.found = False
db.put(person)
def get_person(repo, id):
return Person.get(repo, expand_id(repo, id))
def get_notes(repo, id):
return list(Note.all_in_repo(repo).filter(
'person_record_id =', expand_id(repo, id)))
def delete_person(person):
"""Deletes a Person, possibly leaving behind an empty placeholder."""
if person.is_original():
person.expiry_date = get_utcnow()
person.put_expiry_flags()
person.wipe_contents()
else:
person.delete_related_entities(delete_self=True)
def delete_repo(repo):
"""Deletes a Repo and associated Person, Note, Authorization, Subscription
(but not Counter, ApiActionLog, or UserAgentLog) entities."""
for person in Person.all_in_repo(repo, filter_expired=False):
delete_person(person)
entities = [Repo.get_by_key_name(repo)]
for cls in [Person, Note, Authorization, Subscription]:
entities += list(cls.all().filter('repo =', repo))
min_key = db.Key.from_path('ConfigEntry', repo + ':')
max_key = db.Key.from_path('ConfigEntry', repo + ';')
entities += list(config.ConfigEntry.all().filter('__key__ >', min_key
).filter('__key__ <', max_key))
db.delete(entities)
def get_all_resources():
"""Gets all the Resource entities and returns a dictionary of the contents.
The resulting dictionary has the structure: {
<bundle_name>: {
'created': <bundle_created_datetime>,
'resources': {
<resource_name>: {
'cache_seconds': <cache_seconds>
'content': <content_string>
'last_modified': <last_modified_datetime>
}
}
}
"""
import resources
bundle_dicts = {}
for b in resources.ResourceBundle.all():
resource_dicts = {}
for r in resources.Resource.all().ancestor(b):
resource_dicts[r.key().name()] = {
'cache_seconds': r.cache_seconds,
'content': r.content,
'last_modified': r.last_modified
}
bundle_dicts[b.key().name()] = {
'created': b.created,
'resources': resource_dicts
}
return bundle_dicts
def download_resources(filename):
"""Downloads all the Resource data into a backup file in pickle format."""
file = open(filename, 'w')
pickle.dump(get_all_resources(), file)
file.close()
| apache-2.0 |
newville/scikit-image | doc/examples/plot_rank_mean.py | 17 | 1499 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title('Percentile mean')
ax0.axis('off')
ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title('Bilateral mean')
ax1.axis('off')
ax2.imshow(np.hstack((image, normal_result)))
ax2.set_title('Local mean')
ax2.axis('off')
plt.show()
| bsd-3-clause |
Arcanemagus/SickRage | lib/hachoir_parser/image/tga.py | 95 | 2911 | """
Truevision Targa Graphic (TGA) picture parser.
Author: Victor Stinner
Creation: 18 december 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.image.common import PaletteRGB
class Line(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/bpp"].value
def createFields(self):
for x in xrange(self["/width"].value):
yield UInt8(self, "pixel[]")
class Pixels(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/height"].value * self["/bpp"].value
def createFields(self):
if self["/options"].value == 0:
RANGE = xrange(self["/height"].value-1,-1,-1)
else:
RANGE = xrange(self["/height"].value)
for y in RANGE:
yield Line(self, "line[%u]" % y)
class TargaFile(Parser):
PARSER_TAGS = {
"id": "targa",
"category": "image",
"file_ext": ("tga",),
"mime": (u"image/targa", u"image/tga", u"image/x-tga"),
"min_size": 18*8,
"description": u"Truevision Targa Graphic (TGA)"
}
CODEC_NAME = {
1: u"8-bit uncompressed",
2: u"24-bit uncompressed",
9: u"8-bit RLE",
10: u"24-bit RLE",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["version"].value != 1:
return "Unknown version"
if self["codec"].value not in self.CODEC_NAME:
return "Unknown codec"
if self["x_min"].value != 0 or self["y_min"].value != 0:
return "(x_min, y_min) is not (0,0)"
if self["bpp"].value not in (8, 24):
return "Unknown bits/pixel value"
return True
def createFields(self):
yield UInt8(self, "hdr_size", "Header size in bytes")
yield UInt8(self, "version", "Targa version (always one)")
yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME)
yield UInt16(self, "palette_ofs", "Palette absolute file offset")
yield UInt16(self, "nb_color", "Number of color")
yield UInt8(self, "color_map_size", "Color map entry size")
yield UInt16(self, "x_min")
yield UInt16(self, "y_min")
yield UInt16(self, "width")
yield UInt16(self, "height")
yield UInt8(self, "bpp", "Bits per pixel")
yield UInt8(self, "options", "Options (0: vertical mirror)")
if self["bpp"].value == 8:
yield PaletteRGB(self, "palette", 256)
if self["codec"].value == 1:
yield Pixels(self, "pixels")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_pixels", size)
| gpl-3.0 |
lsaffre/voga | lino_voga/lib/voga/fixtures/demo.py | 2 | 1358 | # -*- coding: UTF-8 -*-
# Copyright 2013-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from django.conf import settings
from lino.api import dd, rt
def objects():
Person = rt.models.contacts.Person
Teacher = rt.models.courses.Teacher
User = rt.models.users.User
from lino.modlib.users.choicelists import UserTypes
Place = rt.models.countries.Place
eupen = Place.objects.get(name__exact='Eupen')
person = Person(first_name="Marianne", last_name="Martin",
email=settings.SITE.demo_email,
city=eupen, gender=dd.Genders.female)
yield person
yield User(username=person.first_name.lower(),
partner=person, user_type='100')
person = Person(first_name="Monique", last_name="Mommer",
email=settings.SITE.demo_email,
city=eupen, gender=dd.Genders.female)
yield person
yield User(username=person.first_name.lower(),
partner=person, user_type='200')
person = Teacher(first_name="Tom", last_name="Thess",
email=settings.SITE.demo_email,
city=eupen, gender=dd.Genders.male)
yield person
yield User(username=person.first_name.lower(),
partner=person, user_type=UserTypes.teacher)
| agpl-3.0 |
ldieselUT/Kruus-robtech | install/_setup_util.py | 1 | 12413 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/tudeng/Kruus-robtech/install;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| mit |
odubno/microblog | flask/lib/python2.7/site-packages/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| bsd-3-clause |
signal18/replication-manager | share/opensvc/compliance/com.replication-manager/sysctl.py | 2 | 8354 | #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_SYSCTL_",
"example_value": """
{
"key": "vm.lowmem_reserve_ratio",
"index": 1,
"op": ">",
"value": 256
}
""",
"description": """* Verify a linux kernel parameter value is on target
* Live parameter value (sysctl executable)
* Persistent parameter value (/etc/sysctl.conf)
""",
"form_definition": """
Desc: |
A rule to set a list of Linux kernel parameters to be set in /etc/sysctl.conf. Current values can be checked as strictly equal, or superior/inferior to their target value. Each field in a vectored value can be tuned independantly using the index key.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: sysctl
Inputs:
-
Id: key
Label: Key
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: The /etc/sysctl.conf parameter to check.
-
Id: index
Label: Index
DisplayModeLabel: idx
LabelCss: action16
Mandatory: Yes
Default: 0
Type: integer
Help: The /etc/sysctl.conf parameter to check.
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The /etc/sysctl.conf parameter target value.
""",
}
import os
import sys
import json
import pwd
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class Sysctl(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
if os.uname()[0] != "Linux":
raise NotApplicable()
self.need_reload = False
self.cf = os.path.join(os.sep, "etc", "sysctl.conf")
if not os.path.exists(self.cf):
perror(self.cf, 'does not exist')
raise NotApplicable()
self.keys = []
self.cache = None
self.keys = self.get_rules()
if len(self.keys) == 0:
raise NotApplicable()
self.convert_keys()
def fixable(self):
return RET_OK
def parse_val(self, val):
val = list(map(lambda x: x.strip(), val.strip().split()))
for i, e in enumerate(val):
try:
val[i] = int(e)
except:
pass
return val
def get_keys(self):
with open(self.cf, 'r') as f:
buff = f.read()
if self.cache is None:
self.cache = {}
for line in buff.splitlines():
line = line.strip()
if line.startswith('#'):
continue
l = line.split('=')
if len(l) != 2:
continue
key = l[0].strip()
val = self.parse_val(l[1])
self.cache[key] = val
def get_live_key(self, key):
p = Popen(['sysctl', key], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
l = bdecode(out).split('=')
if len(l) != 2:
return None
val = self.parse_val(l[1])
return val
def get_key(self, key):
if self.cache is None:
self.get_keys()
if key not in self.cache:
return None
return self.cache[key]
def fix_key(self, key):
done = False
target = key['value']
index = key['index']
with open(self.cf, 'r') as f:
buff = f.read()
lines = buff.split('\n')
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('#'):
continue
l = line.split('=')
if len(l) != 2:
continue
keyname = l[0].strip()
if key['key'] != keyname:
continue
if done:
pinfo("sysctl: remove redundant key %s"%keyname)
del lines[i]
continue
val = self.parse_val(l[1])
if target == val[index]:
done = True
continue
pinfo("sysctl: set %s[%d] = %s"%(keyname, index, str(target)))
val[index] = target
lines[i] = "%s = %s"%(keyname, " ".join(map(str, val)))
done = True
if not done:
# if key is not in sysctl.conf, get the value from kernel
val = self.get_live_key(key['key'])
if val is None:
perror("key '%s' not found in live kernel parameters" % key['key'])
return RET_ERR
if target != val[index]:
val[index] = target
pinfo("sysctl: set %s = %s"%(key['key'], " ".join(map(str, val))))
lines += ["%s = %s"%(key['key'], " ".join(map(str, val)))]
try:
with open(self.cf, 'w') as f:
f.write('\n'.join(lines))
except:
perror("failed to write sysctl.conf")
return RET_ERR
return RET_OK
def convert_keys(self):
keys = []
for key in self.keys:
keyname = key['key']
value = key['value']
if type(value) == list:
if len(value) > 0 and type(value[0]) != list:
value = [value]
for i, v in enumerate(value):
keys.append({
"key": keyname,
"index": i,
"op": v[0],
"value": v[1],
})
elif 'key' in key and 'index' in key and 'op' in key and 'value' in key:
keys.append(key)
self.keys = keys
def check_key(self, key, verbose=False):
r = RET_OK
keyname = key['key']
target = key['value']
op = key['op']
i = key['index']
current_value = self.get_key(keyname)
current_live_value = self.get_live_key(keyname)
if current_value is None:
if verbose:
perror("key '%s' not found in sysctl.conf"%keyname)
return RET_ERR
if op == "=" and str(current_value[i]) != str(target):
if verbose:
perror("sysctl err: %s[%d] = %s, target: %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
elif op == ">=" and type(target) == int and current_value[i] < target:
if verbose:
perror("sysctl err: %s[%d] = %s, target: >= %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
elif op == "<=" and type(target) == int and current_value[i] > target:
if verbose:
perror("sysctl err: %s[%d] = %s, target: <= %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
else:
if verbose:
pinfo("sysctl ok: %s[%d] = %s, on target"%(keyname, i, str(current_value[i])))
if r == RET_OK and current_live_value is not None and current_value != current_live_value:
if verbose:
perror("sysctl err: %s on target in sysctl.conf but kernel value is different"%(keyname))
self.need_reload = True
r |= RET_ERR
return r
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def reload_sysctl(self):
cmd = ['sysctl', '-e', '-p']
pinfo("sysctl:", " ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.communicate()
if p.returncode != 0:
perror("reload failed")
return RET_ERR
return RET_OK
def fix(self):
r = 0
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
self.need_reload = True
r |= self.fix_key(key)
if self.need_reload:
r |= self.reload_sysctl()
return r
if __name__ == "__main__":
main(Sysctl)
| gpl-3.0 |
nikolas/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/model_inheritance_regress/models.py | 75 | 4389 | import datetime
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u"%s the place" % self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __unicode__(self):
return u"%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __unicode__(self):
return u"%s the italian restaurant" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __unicode__(self):
return u"%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, parent_link=True)
class Supplier(models.Model):
restaurant = models.ForeignKey(Restaurant)
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier,related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __unicode__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __unicode__(self):
return self.base_name
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __unicode__(self):
return "PK = %d, base_name = %s, derived_name = %s" \
% (self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = u'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __unicode__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField()
class TrainStation(Station):
zone = models.IntegerField()
| gpl-3.0 |
hkawasaki/kawasaki-aio8-0 | lms/djangoapps/class_dashboard/dashboard_data.py | 10 | 20700 | """
Computes the data to display on the Instructor Dashboard
"""
from util.json_request import JsonResponse
from courseware import models
from django.db.models import Count
from django.utils.translation import ugettext as _
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from analytics.csvs import create_csv_response
# Used to limit the length of list displayed to the screen.
MAX_SCREEN_LIST_LENGTH = 250
def get_problem_grade_distribution(course_id):
"""
Returns the grade distribution per problem for the course
`course_id` the course ID for the course interested in
Output is a dict, where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
"""
# Aggregate query on studentmodule table for grade data for all problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
curr_problem = row['module_state_key']
# Build set of grade distributions for each problem that has student responses
if curr_problem in prob_grade_distrib:
prob_grade_distrib[curr_problem]['grade_distrib'].append((row['grade'], row['count_grade']))
if (prob_grade_distrib[curr_problem]['max_grade'] != row['max_grade']) and \
(prob_grade_distrib[curr_problem]['max_grade'] < row['max_grade']):
prob_grade_distrib[curr_problem]['max_grade'] = row['max_grade']
else:
prob_grade_distrib[curr_problem] = {
'max_grade': row['max_grade'],
'grade_distrib': [(row['grade'], row['count_grade'])]
}
return prob_grade_distrib
def get_sequential_open_distrib(course_id):
"""
Returns the number of students that opened each subsection/sequential of the course
`course_id` the course ID for the course interested in
Outputs a dict mapping the 'module_id' to the number of students that have opened that subsection/sequential.
"""
# Aggregate query on studentmodule table for "opening a subsection" data
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
module_type__exact="sequential",
).values('module_state_key').annotate(count_sequential=Count('module_state_key'))
# Build set of "opened" data for each subsection that has "opened" data
sequential_open_distrib = {}
for row in db_query:
sequential_open_distrib[row['module_state_key']] = row['count_sequential']
return sequential_open_distrib
def get_problem_set_grade_distrib(course_id, problem_set):
"""
Returns the grade distribution for the problems specified in `problem_set`.
`course_id` the course ID for the course interested in
`problem_set` an array of strings representing problem module_id's.
Requests from the database the a count of each grade for each problem in the `problem_set`.
Returns a dict, where the key is the problem 'module_id' and the value is a dict with two parts:
'max_grade' - the maximum grade possible for the course
'grade_distrib' - array of tuples (`grade`,`count`) ordered by `grade`
"""
# Aggregate query on studentmodule table for grade data for set of problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
module_state_key__in=problem_set,
).values(
'module_state_key',
'grade',
'max_grade',
).annotate(count_grade=Count('grade')).order_by('module_state_key', 'grade')
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
if row['module_state_key'] not in prob_grade_distrib:
prob_grade_distrib[row['module_state_key']] = {
'max_grade': 0,
'grade_distrib': [],
}
curr_grade_distrib = prob_grade_distrib[row['module_state_key']]
curr_grade_distrib['grade_distrib'].append((row['grade'], row['count_grade']))
if curr_grade_distrib['max_grade'] < row['max_grade']:
curr_grade_distrib['max_grade'] = row['max_grade']
return prob_grade_distrib
def get_d3_problem_grade_distrib(course_id):
"""
Returns problem grade distribution information for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array of dicts in the order of the sections. Each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
prob_grade_distrib = get_problem_grade_distribution(course_id)
d3_data = []
# Retrieve course object down to problems
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
# Iterate through sections, subsections, units, problems
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
for subsection in section.get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
# Student data is at the problem level
if child.location.category == 'problem':
c_problem += 1
stack_data = []
# Construct label to display for this problem
label = "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem)
# Only problems in prob_grade_distrib have had a student submission.
if child.location.url() in prob_grade_distrib:
# Get max_grade, grade_distribution for this problem
problem_info = prob_grade_distrib[child.location.url()]
# Get problem_name for tooltip
problem_name = own_metadata(child).get('display_name', '')
# Compute percent of this grade over max_grade
max_grade = float(problem_info['max_grade'])
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = (grade * 100.0) / max_grade
# Construct tooltip for problem in grade distibution view
tooltip = _("{label} {problem_name} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
label=label,
problem_name=problem_name,
count_grade=count_grade,
students=_("students"),
percent=percent,
grade=grade,
max_grade=max_grade,
questions=_("questions"),
)
# Construct data to be sent to d3
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
'module_url': child.location.url(),
})
problem = {
'xValue': label,
'stackData': stack_data,
}
data.append(problem)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_sequential_open_distrib(course_id):
"""
Returns how many students opened a sequential/subsection for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array in the order of the sections and each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of how many students opened each sequential/subsection
"""
sequential_open_distrib = get_sequential_open_distrib(course_id)
d3_data = []
# Retrieve course object down to subsection
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=2)
# Iterate through sections, subsections
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
# Construct data for each subsection to be sent to d3
for subsection in section.get_children():
c_subsection += 1
subsection_name = own_metadata(subsection).get('display_name', '')
num_students = 0
if subsection.location.url() in sequential_open_distrib:
num_students = sequential_open_distrib[subsection.location.url()]
stack_data = []
tooltip = _("{num_students} student(s) opened Subsection {subsection_num}: {subsection_name}").format(
num_students=num_students,
subsection_num=c_subsection,
subsection_name=subsection_name,
)
stack_data.append({
'color': 0,
'value': num_students,
'tooltip': tooltip,
'module_url': subsection.location.url(),
})
subsection = {
'xValue': "SS {0}".format(c_subsection),
'stackData': stack_data,
}
data.append(subsection)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_section_grade_distrib(course_id, section):
"""
Returns the grade distribution for the problems in the `section` section in a format for the d3 code.
`course_id` a string that is the course's ID.
`section` an int that is a zero-based index into the course's list of sections.
Navigates to the section specified to find all the problems associated with that section and then finds the grade
distribution for those problems. Finally returns an object formated the way the d3_stacked_bar_graph.js expects its
data object to be in.
If this is requested multiple times quickly for the same course, it is better to call
get_d3_problem_grade_distrib and pick out the sections of interest.
Returns an array of dicts with the following keys (taken from d3_stacked_bar_graph.js's documentation)
'xValue' - Corresponding value for the x-axis
'stackData' - Array of objects with key, value pairs that represent a bar:
'color' - Defines what "color" the bar will map to
'value' - Maps to the height of the bar, along the y-axis
'tooltip' - (Optional) Text to display on mouse hover
"""
# Retrieve course object down to problems
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
problem_set = []
problem_info = {}
c_subsection = 0
for subsection in course.get_children()[section].get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
if (child.location.category == 'problem'):
c_problem += 1
problem_set.append(child.location.url())
problem_info[child.location.url()] = {
'id': child.location.url(),
'x_value': "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem),
'display_name': own_metadata(child).get('display_name', ''),
}
# Retrieve grade distribution for these problems
grade_distrib = get_problem_set_grade_distrib(course_id, problem_set)
d3_data = []
# Construct data for each problem to be sent to d3
for problem in problem_set:
stack_data = []
if problem in grade_distrib: # Some problems have no data because students have not tried them yet.
max_grade = float(grade_distrib[problem]['max_grade'])
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = (grade * 100.0) / max_grade
# Construct tooltip for problem in grade distibution view
tooltip = _("{problem_info_x} {problem_info_n} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
problem_info_x=problem_info[problem]['x_value'],
count_grade=count_grade,
students=_("students"),
percent=percent,
problem_info_n=problem_info[problem]['display_name'],
grade=grade,
max_grade=max_grade,
questions=_("questions"),
)
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
})
d3_data.append({
'xValue': problem_info[problem]['x_value'],
'stackData': stack_data,
})
return d3_data
def get_section_display_name(course_id):
"""
Returns an array of the display names for each section in the course.
`course_id` the course ID for the course interested in
The ith string in the array is the display name of the ith section in the course.
"""
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
section_display_name = [""] * len(course.get_children())
i = 0
for section in course.get_children():
section_display_name[i] = own_metadata(section).get('display_name', '')
i += 1
return section_display_name
def get_array_section_has_problem(course_id):
"""
Returns an array of true/false whether each section has problems.
`course_id` the course ID for the course interested in
The ith value in the array is true if the ith section in the course contains problems and false otherwise.
"""
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
b_section_has_problem = [False] * len(course.get_children())
i = 0
for section in course.get_children():
for subsection in section.get_children():
for unit in subsection.get_children():
for child in unit.get_children():
if child.location.category == 'problem':
b_section_has_problem[i] = True
break # out of child loop
if b_section_has_problem[i]:
break # out of unit loop
if b_section_has_problem[i]:
break # out of subsection loop
i += 1
return b_section_has_problem
def get_students_opened_subsection(request, csv=False):
"""
Get a list of students that opened a particular subsection.
If 'csv' is False, returns a dict of student's name: username.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames for CSV download.
"""
module_id = request.GET.get('module_id')
csv = request.GET.get('csv')
# Query for "opened a subsection" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_id,
module_type__exact='sequential',
).values('student__username', 'student__profile__name').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
results.append({
'name': student['student__profile__name'],
'username': student['student__username'],
})
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[tooltip.index('S'):])
header = ['Name', 'Username']
for student in students:
results.append([student['student__profile__name'], student['student__username']])
response = create_csv_response(filename, header, results)
return response
def get_students_problem_grades(request, csv=False):
"""
Get a list of students and grades for a particular problem.
If 'csv' is False, returns a dict of student's name: username: grade: percent.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames, grades, percents for CSV download.
"""
module_id = request.GET.get('module_id')
csv = request.GET.get('csv')
# Query for "problem grades" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_id,
module_type__exact='problem',
grade__isnull=False,
).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
student_dict = {
'name': student['student__profile__name'],
'username': student['student__username'],
'grade': student['grade'],
}
student_dict['percent'] = 0
if student['max_grade'] > 0:
student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
results.append(student_dict)
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])
header = ['Name', 'Username', 'Grade', 'Percent']
for student in students:
percent = 0
if student['max_grade'] > 0:
percent = round(student['grade'] * 100 / student['max_grade'])
results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
response = create_csv_response(filename, header, results)
return response
def sanitize_filename(filename):
"""
Utility function
"""
filename = filename.replace(" ", "_")
filename = filename.encode('ascii')
filename = filename[0:25] + '.csv'
return filename
| agpl-3.0 |
AnishShah/tensorflow | tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py | 46 | 4250 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for checkpoint converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tempfile
from tensorflow.contrib.rnn.python.tools import checkpoint_convert
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class CheckpointConvertTest(test.TestCase):
def setUp(self):
self._old_ckpt_path = tempfile.mktemp()
self._new_ckpt_path = tempfile.mktemp()
ops.reset_default_graph()
def tearDown(self):
for file_name in glob.glob(self._old_ckpt_path + "*"):
os.remove(file_name)
for file_name in glob.glob(self._new_ckpt_path + "*"):
os.remove(file_name)
def testReplacementDictsContainUniqueAndNonEmptyVariableNames(self):
for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
new_name = checkpoint_convert.RNN_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
for old_name in checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS:
new_name = checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
def testConversionFromV2WithConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
variables.Variable(20.0, name=old_name)
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path)
self.assertTrue(glob.glob(self._new_ckpt_path + "*"))
self.assertItemsEqual(
set(checkpoint_convert.RNN_NAME_REPLACEMENTS.values()).union(["a"]),
new_var_map.keys())
self.assertEqual(checkpoint_convert.RNN_NAME_REPLACEMENTS, conversion_map)
def testConversionFromV2WithoutConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path)
self.assertItemsEqual(["a"], new_var_map.keys())
self.assertFalse(conversion_map)
def testConversionToV1Succeeds(self):
variables.Variable(10.0, name="a")
variables.Variable(
20.0, name=list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1])
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path, write_v1_checkpoint=True)
self.assertItemsEqual(
["a", list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]],
new_var_map.keys())
self.assertEqual(
{list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1]:
list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]},
conversion_map)
if __name__ == "__main__":
test.main()
| apache-2.0 |
praveenmax/OctoPrint-redd | src/octoprint/server/__init__.py | 3 | 47109 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import uuid
from sockjs.tornado import SockJSRouter
from flask import Flask, g, request, session, Blueprint, Request, Response
from flask.ext.login import LoginManager, current_user
from flask.ext.principal import Principal, Permission, RoleNeed, identity_loaded, UserNeed
from flask.ext.babel import Babel, gettext, ngettext
from flask.ext.assets import Environment, Bundle
from babel import Locale
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from collections import defaultdict
import os
import logging
import logging.config
import atexit
import signal
import base64
SUCCESS = {}
NO_CONTENT = ("", 204)
NOT_MODIFIED = ("Not Modified", 304)
app = Flask("octoprint")
assets = None
babel = None
debug = False
printer = None
printerProfileManager = None
fileManager = None
slicingManager = None
analysisQueue = None
userManager = None
eventManager = None
loginManager = None
pluginManager = None
appSessionManager = None
pluginLifecycleManager = None
preemptiveCache = None
principals = Principal(app)
admin_permission = Permission(RoleNeed("admin"))
user_permission = Permission(RoleNeed("user"))
# only import the octoprint stuff down here, as it might depend on things defined above to be initialized already
from octoprint.printer import get_connection_options
from octoprint.printer.profile import PrinterProfileManager
from octoprint.printer.standard import Printer
from octoprint.settings import settings
import octoprint.users as users
import octoprint.events as events
import octoprint.plugin
import octoprint.timelapse
import octoprint._version
import octoprint.util
import octoprint.filemanager.storage
import octoprint.filemanager.analysis
import octoprint.slicing
from octoprint.server.util.flask import PreemptiveCache
from . import util
UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)
versions = octoprint._version.get_versions()
VERSION = versions['version']
BRANCH = versions.get('branch', None)
DISPLAY_VERSION = "%s (%s branch)" % (VERSION, BRANCH) if BRANCH else VERSION
REVISION = versions.get('full-revision-id', versions.get('full', None))
del versions
LOCALES = []
LANGUAGES = set()
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
user = load_user(identity.id)
if user is None:
return
identity.provides.add(UserNeed(user.get_id()))
if user.is_user():
identity.provides.add(RoleNeed("user"))
if user.is_admin():
identity.provides.add(RoleNeed("admin"))
def load_user(id):
if id == "_api":
return users.ApiUser()
if session and "usersession.id" in session:
sessionid = session["usersession.id"]
else:
sessionid = None
if userManager.enabled:
if sessionid:
return userManager.findUser(userid=id, session=sessionid)
else:
return userManager.findUser(userid=id)
return users.DummyUser()
#~~ startup code
class Server(object):
def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False, logConf=None, octoprint_daemon=None):
self._configfile = configfile
self._basedir = basedir
self._host = host
self._port = port
self._debug = debug
self._allowRoot = allowRoot
self._logConf = logConf
self._server = None
self._octoprint_daemon = octoprint_daemon
self._logger = None
self._lifecycle_callbacks = defaultdict(list)
self._template_searchpaths = []
self._intermediary_server = None
def run(self):
if not self._allowRoot:
self._check_for_root()
global app
global babel
global printer
global printerProfileManager
global fileManager
global slicingManager
global analysisQueue
global userManager
global eventManager
global loginManager
global pluginManager
global appSessionManager
global pluginLifecycleManager
global preemptiveCache
global debug
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
import sys
debug = self._debug
# first initialize the settings singleton and make sure it uses given configfile and basedir if available
s = settings(init=True, basedir=self._basedir, configfile=self._configfile)
# then monkey patch a bunch of stuff
util.tornado.fix_ioloop_scheduling()
util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")])
# setup app
self._setup_app(app)
# setup i18n
self._setup_i18n(app)
# then initialize logging
self._setup_logging(self._debug, self._logConf)
self._logger = logging.getLogger(__name__)
def exception_logger(exc_type, exc_value, exc_tb):
self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))
sys.excepthook = exception_logger
self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)
# start the intermediary server
self._start_intermediary_server(s)
# then initialize the plugin manager
pluginManager = octoprint.plugin.plugin_manager(init=True)
printerProfileManager = PrinterProfileManager()
eventManager = events.eventManager()
analysisQueue = octoprint.filemanager.analysis.AnalysisQueue()
slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager)
storage_managers = dict()
storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads"))
fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers)
printer = Printer(fileManager, analysisQueue, printerProfileManager)
appSessionManager = util.flask.AppSessionManager()
pluginLifecycleManager = LifecycleManager(pluginManager)
preemptiveCache = PreemptiveCache(os.path.join(s.getBaseFolder("data"), "preemptive_cache_config.yaml"))
# ... and initialize all plugins
def octoprint_plugin_inject_factory(name, implementation):
"""Factory for injections for all OctoPrintPlugins"""
if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin):
# we only care about OctoPrintPlugins
return None
return dict(
plugin_manager=pluginManager,
printer_profile_manager=printerProfileManager,
event_bus=eventManager,
analysis_queue=analysisQueue,
slicing_manager=slicingManager,
file_manager=fileManager,
printer=printer,
app_session_manager=appSessionManager,
plugin_lifecycle_manager=pluginLifecycleManager,
data_folder=os.path.join(settings().getBaseFolder("data"), name),
preemptive_cache=preemptiveCache
)
def settings_plugin_inject_factory(name, implementation):
"""Factory for additional injections depending on plugin type"""
if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
# we only care about SettingsPlugins
return None
# SettingsPlugin instnances get a PluginSettings instance injected
default_settings = implementation.get_settings_defaults()
get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors()
plugin_settings = octoprint.plugin.plugin_settings(name,
defaults=default_settings,
get_preprocessors=get_preprocessors,
set_preprocessors=set_preprocessors)
return dict(settings=plugin_settings)
def settings_plugin_config_migration_and_cleanup(name, implementation):
"""Take care of migrating and cleaning up any old settings"""
if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
return
settings_version = implementation.get_settings_version()
settings_migrator = implementation.on_settings_migrate
if settings_version is not None and settings_migrator is not None:
stored_version = implementation._settings.get_int([octoprint.plugin.SettingsPlugin.config_version_key])
if stored_version is None or stored_version < settings_version:
settings_migrator(settings_version, stored_version)
implementation._settings.set_int([octoprint.plugin.SettingsPlugin.config_version_key], settings_version)
implementation.on_settings_cleanup()
implementation._settings.save()
implementation.on_settings_initialized()
pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory]
pluginManager.initialize_implementations()
settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin)
for implementation in settingsPlugins:
try:
settings_plugin_config_migration_and_cleanup(implementation._identifier, implementation)
except:
self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier))
pluginManager.implementation_post_inits=[settings_plugin_config_migration_and_cleanup]
pluginManager.log_all_plugins()
# initialize file manager and register it for changes in the registered plugins
fileManager.initialize()
pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins())
# initialize slicing manager and register it for changes in the registered plugins
slicingManager.initialize()
pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers())
# setup jinja2
self._setup_jinja2()
# make sure plugin lifecycle events relevant for jinja2 are taken care of
def template_enabled(name, plugin):
if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
return
self._register_additional_template_plugin(plugin.implementation)
def template_disabled(name, plugin):
if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
return
self._unregister_additional_template_plugin(plugin.implementation)
pluginLifecycleManager.add_callback("enabled", template_enabled)
pluginLifecycleManager.add_callback("disabled", template_disabled)
# setup assets
self._setup_assets()
# configure timelapse
octoprint.timelapse.configure_timelapse()
# setup command triggers
events.CommandTrigger(printer)
if self._debug:
events.DebugEventListener()
# setup access control
userManagerName = s.get(["accessControl", "userManager"])
try:
clazz = octoprint.util.get_class(userManagerName)
userManager = clazz()
except AttributeError as e:
self._logger.exception("Could not instantiate user manager {}, falling back to FilebasedUserManager!".format(userManagerName))
userManager = octoprint.users.FilebasedUserManager()
finally:
userManager.enabled = s.getBoolean(["accessControl", "enabled"])
loginManager = LoginManager()
loginManager.session_protection = "strong"
loginManager.user_callback = load_user
if not userManager.enabled:
loginManager.anonymous_user = users.DummyUser
principals.identity_loaders.appendleft(users.dummy_identity_loader)
loginManager.init_app(app)
# register API blueprint
self._setup_blueprints()
## Tornado initialization starts here
if self._host is None:
self._host = s.get(["server", "host"])
if self._port is None:
self._port = s.getInt(["server", "port"])
ioloop = IOLoop()
ioloop.install()
self._router = SockJSRouter(self._create_socket_connection, "/sockjs")
upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"]))
def mime_type_guesser(path):
from octoprint.filemanager import get_mime_type
return get_mime_type(path)
download_handler_kwargs = dict(
as_attachment=True,
allow_client_caching=False
)
additional_mime_types=dict(mime_type_guesser=mime_type_guesser)
admin_validator = dict(access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator))
no_hidden_files_validator = dict(path_validation=util.tornado.path_validation_factory(lambda path: not octoprint.util.is_hidden_path(path), status_code=404))
def joined_dict(*dicts):
if not len(dicts):
return dict()
joined = dict()
for d in dicts:
joined.update(d)
return joined
server_routes = self._router.urls + [
# various downloads
(r"/downloads/timelapse/([^/]*\.mp[g4])", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("timelapse")), download_handler_kwargs, no_hidden_files_validator)),
(r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("uploads")), download_handler_kwargs, no_hidden_files_validator, additional_mime_types)),
(r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("logs")), download_handler_kwargs, admin_validator)),
# camera snapshot
(r"/downloads/camera/current", util.tornado.UrlProxyHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))),
# generated webassets
(r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets"))),
# online indicators - text file with "online" as content and a transparent gif
(r"/online.txt", util.tornado.StaticDataHandler, dict(data="online\n")),
(r"/online.gif", util.tornado.StaticDataHandler, dict(data=bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), content_type="image/gif"))
]
# fetch additional routes from plugins
for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items():
try:
result = hook(list(server_routes))
except:
self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals()))
else:
if isinstance(result, (list, tuple)):
for entry in result:
if not isinstance(entry, tuple) or not len(entry) == 3:
continue
if not isinstance(entry[0], basestring):
continue
if not isinstance(entry[2], dict):
continue
route, handler, kwargs = entry
route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])
self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals()))
server_routes.append((route, handler, kwargs))
server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes)))
self._tornado_app = Application(server_routes)
max_body_sizes = [
("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])),
("POST", r"/api/languages", 5 * 1024 * 1024)
]
# allow plugins to extend allowed maximum body sizes
for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items():
try:
result = hook(list(max_body_sizes))
except:
self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals()))
else:
if isinstance(result, (list, tuple)):
for entry in result:
if not isinstance(entry, tuple) or not len(entry) == 3:
continue
if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS:
continue
if not isinstance(entry[2], int):
continue
method, route, size = entry
route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])
self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals()))
max_body_sizes.append((method, route, size))
self._stop_intermediary_server()
# initialize and bind the server
self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"]))
self._server.listen(self._port, address=self._host)
eventManager.fire(events.Events.STARTUP)
# auto connect
if s.getBoolean(["serial", "autoconnect"]):
(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
printer_profile = printerProfileManager.get_default()
connectionOptions = get_connection_options()
if port in connectionOptions["ports"]:
printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default")
# start up watchdogs
if s.getBoolean(["feature", "pollWatched"]):
# use less performant polling observer if explicitely configured
observer = PollingObserver()
else:
# use os default
observer = Observer()
observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched"))
observer.start()
# run our startup plugins
octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
"on_startup",
args=(self._host, self._port))
def call_on_startup(name, plugin):
implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
if implementation is None:
return
implementation.on_startup(self._host, self._port)
pluginLifecycleManager.add_callback("enabled", call_on_startup)
# prepare our after startup function
def on_after_startup():
self._logger.info("Listening on http://%s:%d" % (self._host, self._port))
# now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for
# which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons
# or service xmls or the like). While they are working though the ioloop would block. Therefore we'll
# create a single use thread in which to perform our after-startup-tasks, start that and hand back
# control to the ioloop
def work():
octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
"on_after_startup")
def call_on_after_startup(name, plugin):
implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
if implementation is None:
return
implementation.on_after_startup()
pluginLifecycleManager.add_callback("enabled", call_on_after_startup)
# when we are through with that we also run our preemptive cache
if settings().getBoolean(["devel", "cache", "preemptive"]):
self._execute_preemptive_flask_caching(preemptiveCache)
import threading
threading.Thread(target=work).start()
ioloop.add_callback(on_after_startup)
# prepare our shutdown function
def on_shutdown():
# will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods
# on all registered ShutdownPlugins
self._logger.info("Shutting down...")
observer.stop()
observer.join()
octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin,
"on_shutdown")
if self._octoprint_daemon is not None:
self._logger.info("Cleaning up daemon pidfile")
self._octoprint_daemon.terminated()
self._logger.info("Goodbye!")
atexit.register(on_shutdown)
def sigterm_handler(*args, **kwargs):
# will stop tornado on SIGTERM, making the program exit cleanly
def shutdown_tornado():
ioloop.stop()
ioloop.add_callback_from_signal(shutdown_tornado)
signal.signal(signal.SIGTERM, sigterm_handler)
try:
# this is the main loop - as long as tornado is running, OctoPrint is running
ioloop.start()
except (KeyboardInterrupt, SystemExit):
pass
except:
self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
self._logger.exception("Stacktrace follows:")
def _create_socket_connection(self, session):
global printer, fileManager, analysisQueue, userManager, eventManager
return util.sockjs.PrinterStateConnection(printer, fileManager, analysisQueue, userManager,
eventManager, pluginManager, session)
def _check_for_root(self):
if "geteuid" in dir(os) and os.geteuid() == 0:
exit("You should not run OctoPrint as root!")
def _get_locale(self):
global LANGUAGES
if "l10n" in request.values:
return Locale.negotiate([request.values["l10n"]], LANGUAGES)
if hasattr(g, "identity") and g.identity and userManager.enabled:
userid = g.identity.id
try:
user_language = userManager.getUserSetting(userid, ("interface", "language"))
if user_language is not None and not user_language == "_default":
return Locale.negotiate([user_language], LANGUAGES)
except octoprint.users.UnknownUser:
pass
default_language = settings().get(["appearance", "defaultLanguage"])
if default_language is not None and not default_language == "_default" and default_language in LANGUAGES:
return Locale.negotiate([default_language], LANGUAGES)
return Locale.parse(request.accept_languages.best_match(LANGUAGES))
def _setup_logging(self, debug, logConf=None):
defaultConfig = {
"version": 1,
"formatters": {
"simple": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
},
"serial": {
"format": "%(asctime)s - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"file": {
"class": "octoprint.logging.handlers.CleaningTimedRotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"when": "D",
"backupCount": 6,
"filename": os.path.join(settings().getBaseFolder("logs"), "octoprint.log")
},
"serialFile": {
"class": "octoprint.logging.handlers.SerialLogHandler",
"level": "DEBUG",
"formatter": "serial",
"backupCount": 3,
"filename": os.path.join(settings().getBaseFolder("logs"), "serial.log")
}
},
"loggers": {
"SERIAL": {
"level": "CRITICAL",
"handlers": ["serialFile"],
"propagate": False
},
"tornado.application": {
"level": "INFO"
},
"tornado.general": {
"level": "INFO"
},
"octoprint.server.util.flask": {
"level": "WARN"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
if debug:
defaultConfig["root"]["level"] = "DEBUG"
if logConf is None:
logConf = os.path.join(settings().getBaseFolder("base"), "logging.yaml")
configFromFile = {}
if os.path.exists(logConf) and os.path.isfile(logConf):
import yaml
with open(logConf, "r") as f:
configFromFile = yaml.safe_load(f)
config = octoprint.util.dict_merge(defaultConfig, configFromFile)
logging.config.dictConfig(config)
logging.captureWarnings(True)
import warnings
warnings.simplefilter("always")
if settings().getBoolean(["serial", "log"]):
# enable debug logging to serial.log
logging.getLogger("SERIAL").setLevel(logging.DEBUG)
def _setup_app(self, app):
from octoprint.server.util.flask import ReverseProxiedEnvironment, OctoPrintFlaskRequest, OctoPrintFlaskResponse
s = settings()
app.debug = self._debug
secret_key = s.get(["server", "secretKey"])
if not secret_key:
import string
from random import choice
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
secret_key = "".join(choice(chars) for _ in range(32))
s.set(["server", "secretKey"], secret_key)
s.save()
app.secret_key = secret_key
reverse_proxied = ReverseProxiedEnvironment(
header_prefix=s.get(["server", "reverseProxy", "prefixHeader"]),
header_scheme=s.get(["server", "reverseProxy", "schemeHeader"]),
header_host=s.get(["server", "reverseProxy", "hostHeader"]),
header_server=s.get(["server", "reverseProxy", "serverHeader"]),
header_port=s.get(["server", "reverseProxy", "portHeader"]),
prefix=s.get(["server", "reverseProxy", "prefixFallback"]),
scheme=s.get(["server", "reverseProxy", "schemeFallback"]),
host=s.get(["server", "reverseProxy", "hostFallback"]),
server=s.get(["server", "reverseProxy", "serverFallback"]),
port=s.get(["server", "reverseProxy", "portFallback"])
)
OctoPrintFlaskRequest.environment_wrapper = reverse_proxied
app.request_class = OctoPrintFlaskRequest
app.response_class = OctoPrintFlaskResponse
@app.before_request
def before_request():
g.locale = self._get_locale()
@app.after_request
def after_request(response):
# send no-cache headers with all POST responses
if request.method == "POST":
response.cache_control.no_cache = True
response.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
return response
from octoprint.util.jinja import MarkdownFilter
MarkdownFilter(app)
def _setup_i18n(self, app):
global babel
global LOCALES
global LANGUAGES
babel = Babel(app)
def get_available_locale_identifiers(locales):
result = set()
# add available translations
for locale in locales:
result.add(locale.language)
if locale.territory:
# if a territory is specified, add that too
result.add("%s_%s" % (locale.language, locale.territory))
return result
LOCALES = babel.list_translations()
LANGUAGES = get_available_locale_identifiers(LOCALES)
@babel.localeselector
def get_locale():
return self._get_locale()
def _setup_jinja2(self):
import re
app.jinja_env.add_extension("jinja2.ext.do")
app.jinja_env.add_extension("octoprint.util.jinja.trycatch")
def regex_replace(s, find, replace):
return re.sub(find, replace, s)
html_header_regex = re.compile("<h(?P<number>[1-6])>(?P<content>.*?)</h(?P=number)>")
def offset_html_headers(s, offset):
def repl(match):
number = int(match.group("number"))
number += offset
if number > 6:
number = 6
elif number < 1:
number = 1
return "<h{number}>{content}</h{number}>".format(number=number, content=match.group("content"))
return html_header_regex.sub(repl, s)
markdown_header_regex = re.compile("^(?P<hashs>#+)\s+(?P<content>.*)$", flags=re.MULTILINE)
def offset_markdown_headers(s, offset):
def repl(match):
number = len(match.group("hashs"))
number += offset
if number > 6:
number = 6
elif number < 1:
number = 1
return "{hashs} {content}".format(hashs="#" * number, content=match.group("content"))
return markdown_header_regex.sub(repl, s)
html_link_regex = re.compile("<(?P<tag>a.*?)>(?P<content>.*?)</a>")
def externalize_links(text):
def repl(match):
tag = match.group("tag")
if not u"href" in tag:
return match.group(0)
if not u"target=" in tag and not u"rel=" in tag:
tag += u" target=\"_blank\" rel=\"noreferrer noopener\""
content = match.group("content")
return u"<{tag}>{content}</a>".format(tag=tag, content=content)
return html_link_regex.sub(repl, text)
app.jinja_env.filters["regex_replace"] = regex_replace
app.jinja_env.filters["offset_html_headers"] = offset_html_headers
app.jinja_env.filters["offset_markdown_headers"] = offset_markdown_headers
app.jinja_env.filters["externalize_links"] = externalize_links
# configure additional template folders for jinja2
import jinja2
import octoprint.util.jinja
filesystem_loader = octoprint.util.jinja.FilteredFileSystemLoader([],
path_filter=lambda x: not octoprint.util.is_hidden_path(x))
filesystem_loader.searchpath = self._template_searchpaths
loaders = [app.jinja_loader, filesystem_loader]
if octoprint.util.is_running_from_source():
from octoprint.util.jinja import SelectedFileSystemLoader
root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
allowed = ["AUTHORS.md", "CHANGELOG.md", "SUPPORTERS.md", "THIRDPARTYLICENSES.md"]
loaders.append(SelectedFileSystemLoader(root, allowed, prefix="_data/"))
jinja_loader = jinja2.ChoiceLoader(loaders)
app.jinja_loader = jinja_loader
self._register_template_plugins()
def _execute_preemptive_flask_caching(self, preemptive_cache):
from werkzeug.test import EnvironBuilder
import time
# we clean up entries from our preemptive cache settings that haven't been
# accessed longer than server.preemptiveCache.until days
preemptive_cache_timeout = settings().getInt(["server", "preemptiveCache", "until"])
cutoff_timestamp = time.time() - preemptive_cache_timeout * 24 * 60 * 60
def filter_current_entries(entry):
"""Returns True for entries younger than the cutoff date"""
return "_timestamp" in entry and entry["_timestamp"] > cutoff_timestamp
def filter_http_entries(entry):
"""Returns True for entries targeting http or https."""
return "base_url" in entry \
and entry["base_url"] \
and (entry["base_url"].startswith("http://")
or entry["base_url"].startswith("https://"))
def filter_entries(entry):
"""Combined filter."""
filters = (filter_current_entries,
filter_http_entries)
return all([f(entry) for f in filters])
# filter out all old and non-http entries
cache_data = preemptive_cache.clean_all_data(lambda root, entries: filter(filter_entries, entries))
if not cache_data:
return
def execute_caching():
for route in sorted(cache_data.keys(), key=lambda x: (x.count("/"), x)):
entries = reversed(sorted(cache_data[route], key=lambda x: x.get("_count", 0)))
for kwargs in entries:
plugin = kwargs.get("plugin", None)
additional_request_data = kwargs.get("_additional_request_data", dict())
kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith("_") and not k == "plugin")
kwargs.update(additional_request_data)
try:
if plugin:
self._logger.info("Preemptively caching {} (plugin {}) for {!r}".format(route, plugin, kwargs))
else:
self._logger.info("Preemptively caching {} for {!r}".format(route, kwargs))
headers = kwargs.get("headers", dict())
headers["X-Preemptive-Record"] = "no"
kwargs["headers"] = headers
builder = EnvironBuilder(**kwargs)
app(builder.get_environ(), lambda *a, **kw: None)
except:
self._logger.exception("Error while trying to preemptively cache {} for {!r}".format(route, kwargs))
# asynchronous caching
import threading
cache_thread = threading.Thread(target=execute_caching, name="Preemptive Cache Worker")
cache_thread.daemon = True
cache_thread.start()
def _register_template_plugins(self):
template_plugins = pluginManager.get_implementations(octoprint.plugin.TemplatePlugin)
for plugin in template_plugins:
try:
self._register_additional_template_plugin(plugin)
except:
self._logger.exception("Error while trying to register templates of plugin {}, ignoring it".format(plugin._identifier))
def _register_additional_template_plugin(self, plugin):
folder = plugin.get_template_folder()
if folder is not None and not folder in self._template_searchpaths:
self._template_searchpaths.append(folder)
def _unregister_additional_template_plugin(self, plugin):
folder = plugin.get_template_folder()
if folder is not None and folder in self._template_searchpaths:
self._template_searchpaths.remove(folder)
def _setup_blueprints(self):
from octoprint.server.api import api
from octoprint.server.apps import apps, clear_registered_app
import octoprint.server.views
app.register_blueprint(api, url_prefix="/api")
app.register_blueprint(apps, url_prefix="/apps")
# also register any blueprints defined in BlueprintPlugins
self._register_blueprint_plugins()
# and register a blueprint for serving the static files of asset plugins which are not blueprint plugins themselves
self._register_asset_plugins()
global pluginLifecycleManager
def clear_apps(name, plugin):
clear_registered_app()
pluginLifecycleManager.add_callback("enabled", clear_apps)
pluginLifecycleManager.add_callback("disabled", clear_apps)
def _register_blueprint_plugins(self):
blueprint_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.BlueprintPlugin)
for plugin in blueprint_plugins:
try:
self._register_blueprint_plugin(plugin)
except:
self._logger.exception("Error while registering blueprint of plugin {}, ignoring it".format(plugin._identifier))
continue
def _register_asset_plugins(self):
asset_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.AssetPlugin)
for plugin in asset_plugins:
if isinstance(plugin, octoprint.plugin.BlueprintPlugin):
continue
try:
self._register_asset_plugin(plugin)
except:
self._logger.exception("Error while registering assets of plugin {}, ignoring it".format(plugin._identifier))
continue
def _register_blueprint_plugin(self, plugin):
name = plugin._identifier
blueprint = plugin.get_blueprint()
if blueprint is None:
return
if plugin.is_blueprint_protected():
from octoprint.server.util import apiKeyRequestHandler, corsResponseHandler
blueprint.before_request(apiKeyRequestHandler)
blueprint.after_request(corsResponseHandler)
url_prefix = "/plugin/{name}".format(name=name)
app.register_blueprint(blueprint, url_prefix=url_prefix)
if self._logger:
self._logger.debug("Registered API of plugin {name} under URL prefix {url_prefix}".format(name=name, url_prefix=url_prefix))
def _register_asset_plugin(self, plugin):
name = plugin._identifier
url_prefix = "/plugin/{name}".format(name=name)
blueprint = Blueprint("plugin." + name, name, static_folder=plugin.get_asset_folder())
app.register_blueprint(blueprint, url_prefix=url_prefix)
if self._logger:
self._logger.debug("Registered assets of plugin {name} under URL prefix {url_prefix}".format(name=name, url_prefix=url_prefix))
def _setup_assets(self):
global app
global assets
global pluginManager
util.flask.fix_webassets_cache()
util.flask.fix_webassets_filtertool()
base_folder = settings().getBaseFolder("generated")
# clean the folder
if settings().getBoolean(["devel", "webassets", "clean_on_startup"]):
import shutil
import errno
import sys
for entry in ("webassets", ".webassets-cache"):
path = os.path.join(base_folder, entry)
# delete path if it exists
if os.path.isdir(path):
try:
self._logger.debug("Deleting {path}...".format(**locals()))
shutil.rmtree(path)
except:
self._logger.exception("Error while trying to delete {path}, leaving it alone".format(**locals()))
continue
# re-create path
self._logger.debug("Creating {path}...".format(**locals()))
error_text = "Error while trying to re-create {path}, that might cause errors with the webassets cache".format(**locals())
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EACCES:
# that might be caused by the user still having the folder open somewhere, let's try again after
# waiting a bit
import time
for n in range(3):
time.sleep(0.5)
self._logger.debug("Creating {path}: Retry #{retry} after {time}s".format(path=path, retry=n+1, time=(n + 1)*0.5))
try:
os.makedirs(path)
break
except:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Ignored error while creating directory {path}".format(**locals()))
pass
else:
# this will only get executed if we never did
# successfully execute makedirs above
self._logger.exception(error_text)
continue
else:
# not an access error, so something we don't understand
# went wrong -> log an error and stop
self._logger.exception(error_text)
continue
except:
# not an OSError, so something we don't understand
# went wrong -> log an error and stop
self._logger.exception(error_text)
continue
self._logger.info("Reset webasset folder {path}...".format(**locals()))
AdjustedEnvironment = type(Environment)(Environment.__name__, (Environment,), dict(
resolver_class=util.flask.PluginAssetResolver
))
class CustomDirectoryEnvironment(AdjustedEnvironment):
@property
def directory(self):
return base_folder
assets = CustomDirectoryEnvironment(app)
assets.debug = not settings().getBoolean(["devel", "webassets", "bundle"])
UpdaterType = type(util.flask.SettingsCheckUpdater)(util.flask.SettingsCheckUpdater.__name__, (util.flask.SettingsCheckUpdater,), dict(
updater=assets.updater
))
assets.updater = UpdaterType
enable_gcodeviewer = settings().getBoolean(["gcodeViewer", "enabled"])
preferred_stylesheet = settings().get(["devel", "stylesheet"])
dynamic_core_assets = util.flask.collect_core_assets(enable_gcodeviewer=enable_gcodeviewer)
dynamic_plugin_assets = util.flask.collect_plugin_assets(
enable_gcodeviewer=enable_gcodeviewer,
preferred_stylesheet=preferred_stylesheet
)
js_libs = [
"js/lib/jquery/jquery.min.js",
"js/lib/modernizr.custom.js",
"js/lib/lodash.min.js",
"js/lib/sprintf.min.js",
"js/lib/knockout-3.4.0.js",
"js/lib/knockout.mapping-latest.js",
"js/lib/babel.js",
"js/lib/avltree.js",
"js/lib/bootstrap/bootstrap.js",
"js/lib/bootstrap/bootstrap-modalmanager.js",
"js/lib/bootstrap/bootstrap-modal.js",
"js/lib/bootstrap/bootstrap-slider.js",
"js/lib/bootstrap/bootstrap-tabdrop.js",
"js/lib/jquery/jquery.ui.core.js",
"js/lib/jquery/jquery.ui.widget.js",
"js/lib/jquery/jquery.ui.mouse.js",
"js/lib/jquery/jquery.flot.js",
"js/lib/jquery/jquery.iframe-transport.js",
"js/lib/jquery/jquery.fileupload.js",
"js/lib/jquery/jquery.slimscroll.min.js",
"js/lib/jquery/jquery.qrcode.min.js",
"js/lib/moment-with-locales.min.js",
"js/lib/pusher.color.min.js",
"js/lib/detectmobilebrowser.js",
"js/lib/md5.min.js",
"js/lib/pnotify.min.js",
"js/lib/bootstrap-slider-knockout-binding.js",
"js/lib/loglevel.min.js",
"js/lib/sockjs-0.3.4.min.js"
]
js_core = dynamic_core_assets["js"] + \
dynamic_plugin_assets["bundled"]["js"] + \
["js/app/dataupdater.js",
"js/app/helpers.js",
"js/app/main.js"]
js_plugins = dynamic_plugin_assets["external"]["js"]
if len(js_plugins) == 0:
js_plugins = ["empty"]
js_app = js_plugins + js_core
css_libs = [
"css/bootstrap.min.css",
"css/bootstrap-modal.css",
"css/bootstrap-slider.css",
"css/bootstrap-tabdrop.css",
"css/font-awesome.min.css",
"css/jquery.fileupload-ui.css",
"css/pnotify.min.css"
]
css_core = list(dynamic_core_assets["css"]) + list(dynamic_plugin_assets["bundled"]["css"])
if len(css_core) == 0:
css_core = ["empty"]
css_plugins = list(dynamic_plugin_assets["external"]["css"])
if len(css_plugins) == 0:
css_plugins = ["empty"]
css_app = css_core + css_plugins
less_core = list(dynamic_core_assets["less"]) + list(dynamic_plugin_assets["bundled"]["less"])
if len(less_core) == 0:
less_core = ["empty"]
less_plugins = list(dynamic_plugin_assets["external"]["less"])
if len(less_plugins) == 0:
less_plugins = ["empty"]
less_app = less_core + less_plugins
from webassets.filter import register_filter, Filter
from webassets.filter.cssrewrite.base import PatternRewriter
import re
class LessImportRewrite(PatternRewriter):
name = "less_importrewrite"
patterns = {
"import_rewrite": re.compile("(@import(\s+\(.*\))?\s+)\"(.*)\";")
}
def import_rewrite(self, m):
import_with_options = m.group(1)
import_url = m.group(3)
if not import_url.startswith("http:") and not import_url.startswith("https:") and not import_url.startswith("/"):
import_url = "../less/" + import_url
return "{import_with_options}\"{import_url}\";".format(**locals())
class JsDelimiterBundler(Filter):
name = "js_delimiter_bundler"
options = {}
def input(self, _in, out, **kwargs):
out.write(_in.read())
out.write("\n;\n")
register_filter(LessImportRewrite)
register_filter(JsDelimiterBundler)
# JS
js_libs_bundle = Bundle(*js_libs, output="webassets/packed_libs.js", filters="js_delimiter_bundler")
if settings().getBoolean(["devel", "webassets", "minify"]):
js_core_bundle = Bundle(*js_core, output="webassets/packed_core.js", filters="rjsmin, js_delimiter_bundler")
js_plugins_bundle = Bundle(*js_plugins, output="webassets/packed_plugins.js", filters="rjsmin, js_delimiter_bundler")
js_app_bundle = Bundle(*js_app, output="webassets/packed_app.js", filters="rjsmin, js_delimiter_bundler")
else:
js_core_bundle = Bundle(*js_core, output="webassets/packed_core.js", filters="js_delimiter_bundler")
js_plugins_bundle = Bundle(*js_plugins, output="webassets/packed_plugins.js", filters="js_delimiter_bundler")
js_app_bundle = Bundle(*js_app, output="webassets/packed_app.js", filters="js_delimiter_bundler")
# CSS
css_libs_bundle = Bundle(*css_libs, output="webassets/packed_libs.css")
css_core_bundle = Bundle(*css_core, output="webassets/packed_core.css", filters="cssrewrite")
css_plugins_bundle = Bundle(*css_plugins, output="webassets/packed_plugins.css", filters="cssrewrite")
css_app_bundle = Bundle(*css_app, output="webassets/packed_app.css", filters="cssrewrite")
# LESS
less_core_bundle = Bundle(*less_core, output="webassets/packed_core.less", filters="cssrewrite, less_importrewrite")
less_plugins_bundle = Bundle(*less_plugins, output="webassets/packed_plugins.less", filters="cssrewrite, less_importrewrite")
less_app_bundle = Bundle(*less_app, output="webassets/packed_app.less", filters="cssrewrite, less_importrewrite")
# asset registration
assets.register("js_libs", js_libs_bundle)
assets.register("js_core", js_core_bundle)
assets.register("js_plugins", js_plugins_bundle)
assets.register("js_app", js_app_bundle)
assets.register("css_libs", css_libs_bundle)
assets.register("css_core", css_core_bundle)
assets.register("css_plugins", css_plugins_bundle)
assets.register("css_app", css_app_bundle)
assets.register("less_core", less_core_bundle)
assets.register("less_plugins", less_plugins_bundle)
assets.register("less_app", less_app_bundle)
def _start_intermediary_server(self, s):
import BaseHTTPServer
import SimpleHTTPServer
import threading
host = self._host
port = self._port
if host is None:
host = s.get(["server", "host"])
if port is None:
port = s.getInt(["server", "port"])
self._logger.debug("Starting intermediary server on {}:{}".format(host, port))
class IntermediaryServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self, rules=None, *args, **kwargs):
if rules is None:
rules = []
self.rules = rules
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET(self):
request_path = self.path
if "?" in request_path:
request_path = request_path[0:request_path.find("?")]
for rule in self.rules:
path, data, content_type = rule
if request_path == path:
self.send_response(200)
if content_type:
self.send_header("Content-Type", content_type)
self.end_headers()
self.wfile.write(data)
break
else:
self.send_response(404)
self.wfile.write("Not found")
base_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "static"))
rules = [
("/", ["intermediary.html",], "text/html"),
("/favicon.ico", ["img", "tentacle-20x20.png"], "image/png"),
("/intermediary.gif", bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), "image/gif")
]
def contents(args):
path = os.path.join(base_path, *args)
if not os.path.isfile(path):
return ""
with open(path, "rb") as f:
data = f.read()
return data
def process(rule):
if len(rule) == 2:
path, data = rule
content_type = None
else:
path, data, content_type = rule
if isinstance(data, (list, tuple)):
data = contents(data)
return path, data, content_type
rules = map(process, filter(lambda rule: len(rule) == 2 or len(rule) == 3, rules))
self._intermediary_server = BaseHTTPServer.HTTPServer((host, port), lambda *args, **kwargs: IntermediaryServerHandler(rules, *args, **kwargs))
thread = threading.Thread(target=self._intermediary_server.serve_forever)
thread.daemon = True
thread.start()
self._logger.debug("Intermediary server started")
def _stop_intermediary_server(self):
if self._intermediary_server is None:
return
self._logger.debug("Shutting down intermediary server...")
self._intermediary_server.shutdown()
self._intermediary_server.server_close()
self._logger.debug("Intermediary server shut down")
class LifecycleManager(object):
def __init__(self, plugin_manager):
self._plugin_manager = plugin_manager
self._plugin_lifecycle_callbacks = defaultdict(list)
self._logger = logging.getLogger(__name__)
def on_plugin_event_factory(lifecycle_event):
def on_plugin_event(name, plugin):
self.on_plugin_event(lifecycle_event, name, plugin)
return on_plugin_event
self._plugin_manager.on_plugin_loaded = on_plugin_event_factory("loaded")
self._plugin_manager.on_plugin_unloaded = on_plugin_event_factory("unloaded")
self._plugin_manager.on_plugin_activated = on_plugin_event_factory("activated")
self._plugin_manager.on_plugin_deactivated = on_plugin_event_factory("deactivated")
self._plugin_manager.on_plugin_enabled = on_plugin_event_factory("enabled")
self._plugin_manager.on_plugin_disabled = on_plugin_event_factory("disabled")
def on_plugin_event(self, event, name, plugin):
for lifecycle_callback in self._plugin_lifecycle_callbacks[event]:
lifecycle_callback(name, plugin)
def add_callback(self, events, callback):
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
self._plugin_lifecycle_callbacks[event].append(callback)
def remove_callback(self, callback, events=None):
if events is None:
for event in self._plugin_lifecycle_callbacks:
if callback in self._plugin_lifecycle_callbacks[event]:
self._plugin_lifecycle_callbacks[event].remove(callback)
else:
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
if callback in self._plugin_lifecycle_callbacks[event]:
self._plugin_lifecycle_callbacks[event].remove(callback)
if __name__ == "__main__":
server = Server()
server.run()
| agpl-3.0 |
NateBrune/bitcoin-fio | qa/rpc-tests/rest.py | 10 | 12151 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
import binascii
import json
import StringIO
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls with a request body
def http_get_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += binascii.unhexlify(txid)
binaryRequest += pack("i", n);
binaryRequest += binascii.unhexlify(vintx);
binaryRequest += pack("i", 0);
bin_response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = StringIO.StringIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_get_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/");
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(response_str.encode("hex")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(response_header_str.encode("hex")[0:160], response_header_hex_str[0:160])
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
| mit |
dipanjanS/text-analytics-with-python | Old-First-Edition/Ch06_Text_Similarity_and_Clustering/utils.py | 1 | 1097 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 11 23:06:06 2016
@author: DIP
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def build_feature_matrix(documents, feature_type='frequency',
ngram_range=(1, 1), min_df=0.0, max_df=1.0):
feature_type = feature_type.lower().strip()
if feature_type == 'binary':
vectorizer = CountVectorizer(binary=True, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'frequency':
vectorizer = CountVectorizer(binary=False, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'tfidf':
vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df,
ngram_range=ngram_range)
else:
raise Exception("Wrong feature type entered. Possible values: 'binary', 'frequency', 'tfidf'")
feature_matrix = vectorizer.fit_transform(documents).astype(float)
return vectorizer, feature_matrix | apache-2.0 |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/conch/test/test_insults.py | 15 | 17669 | # -*- test-case-name: twisted.conch.test.test_insults -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch.insults.insults import ServerProtocol, ClientProtocol
from twisted.conch.insults.insults import CS_UK, CS_US, CS_DRAWING, CS_ALTERNATE, CS_ALTERNATE_SPECIAL
from twisted.conch.insults.insults import G0, G1
from twisted.conch.insults.insults import modes
def _getattr(mock, name):
return super(Mock, mock).__getattribute__(name)
def occurrences(mock):
return _getattr(mock, 'occurrences')
def methods(mock):
return _getattr(mock, 'methods')
def _append(mock, obj):
occurrences(mock).append(obj)
default = object()
class Mock(object):
callReturnValue = default
def __init__(self, methods=None, callReturnValue=default):
"""
@param methods: Mapping of names to return values
@param callReturnValue: object __call__ should return
"""
self.occurrences = []
if methods is None:
methods = {}
self.methods = methods
if callReturnValue is not default:
self.callReturnValue = callReturnValue
def __call__(self, *a, **kw):
returnValue = _getattr(self, 'callReturnValue')
if returnValue is default:
returnValue = Mock()
# _getattr(self, 'occurrences').append(('__call__', returnValue, a, kw))
_append(self, ('__call__', returnValue, a, kw))
return returnValue
def __getattribute__(self, name):
methods = _getattr(self, 'methods')
if name in methods:
attrValue = Mock(callReturnValue=methods[name])
else:
attrValue = Mock()
# _getattr(self, 'occurrences').append((name, attrValue))
_append(self, (name, attrValue))
return attrValue
class MockMixin:
def assertCall(self, occurrence, methodName, expectedPositionalArgs=(),
expectedKeywordArgs={}):
attr, mock = occurrence
self.assertEqual(attr, methodName)
self.assertEqual(len(occurrences(mock)), 1)
[(call, result, args, kw)] = occurrences(mock)
self.assertEqual(call, "__call__")
self.assertEqual(args, expectedPositionalArgs)
self.assertEqual(kw, expectedKeywordArgs)
return result
_byteGroupingTestTemplate = """\
def testByte%(groupName)s(self):
transport = StringTransport()
proto = Mock()
parser = self.protocolFactory(lambda: proto)
parser.factory = self
parser.makeConnection(transport)
bytes = self.TEST_BYTES
while bytes:
chunk = bytes[:%(bytesPer)d]
bytes = bytes[%(bytesPer)d:]
parser.dataReceived(chunk)
self.verifyResults(transport, proto, parser)
"""
class ByteGroupingsMixin(MockMixin):
protocolFactory = None
for word, n in [('Pairs', 2), ('Triples', 3), ('Quads', 4), ('Quints', 5), ('Sexes', 6)]:
exec _byteGroupingTestTemplate % {'groupName': word, 'bytesPer': n}
del word, n
def verifyResults(self, transport, proto, parser):
result = self.assertCall(occurrences(proto).pop(0), "makeConnection", (parser,))
self.assertEqual(occurrences(result), [])
del _byteGroupingTestTemplate
class ServerArrowKeys(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# All the arrow keys once
TEST_BYTES = '\x1b[A\x1b[B\x1b[C\x1b[D'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for arrow in (parser.UP_ARROW, parser.DOWN_ARROW,
parser.RIGHT_ARROW, parser.LEFT_ARROW):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (arrow, None))
self.assertEqual(occurrences(result), [])
self.failIf(occurrences(proto))
class PrintableCharacters(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# Some letters and digits, first on their own, then capitalized,
# then modified with alt
TEST_BYTES = 'abc123ABC!@#\x1ba\x1bb\x1bc\x1b1\x1b2\x1b3'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for char in 'abc123ABC!@#':
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, None))
self.assertEqual(occurrences(result), [])
for char in 'abc123':
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, parser.ALT))
self.assertEqual(occurrences(result), [])
occs = occurrences(proto)
self.failIf(occs, "%r should have been []" % (occs,))
class ServerFunctionKeys(ByteGroupingsMixin, unittest.TestCase):
"""Test for parsing and dispatching function keys (F1 - F12)
"""
protocolFactory = ServerProtocol
byteList = []
for bytes in ('OP', 'OQ', 'OR', 'OS', # F1 - F4
'15~', '17~', '18~', '19~', # F5 - F8
'20~', '21~', '23~', '24~'): # F9 - F12
byteList.append('\x1b[' + bytes)
TEST_BYTES = ''.join(byteList)
del byteList, bytes
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for funcNum in range(1, 13):
funcArg = getattr(parser, 'F%d' % (funcNum,))
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (funcArg, None))
self.assertEqual(occurrences(result), [])
self.failIf(occurrences(proto))
class ClientCursorMovement(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ClientProtocol
d2 = "\x1b[2B"
r4 = "\x1b[4C"
u1 = "\x1b[A"
l2 = "\x1b[2D"
# Move the cursor down two, right four, up one, left two, up one, left two
TEST_BYTES = d2 + r4 + u1 + l2 + u1 + l2
del d2, r4, u1, l2
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for (method, count) in [('Down', 2), ('Forward', 4), ('Up', 1),
('Backward', 2), ('Up', 1), ('Backward', 2)]:
result = self.assertCall(occurrences(proto).pop(0), "cursor" + method, (count,))
self.assertEqual(occurrences(result), [])
self.failIf(occurrences(proto))
class ClientControlSequences(unittest.TestCase, MockMixin):
def setUp(self):
self.transport = StringTransport()
self.proto = Mock()
self.parser = ClientProtocol(lambda: self.proto)
self.parser.factory = self
self.parser.makeConnection(self.transport)
result = self.assertCall(occurrences(self.proto).pop(0), "makeConnection", (self.parser,))
self.failIf(occurrences(result))
def testSimpleCardinals(self):
self.parser.dataReceived(
''.join([''.join(['\x1b[' + str(n) + ch for n in ('', 2, 20, 200)]) for ch in 'BACD']))
occs = occurrences(self.proto)
for meth in ("Down", "Up", "Forward", "Backward"):
for count in (1, 2, 20, 200):
result = self.assertCall(occs.pop(0), "cursor" + meth, (count,))
self.failIf(occurrences(result))
self.failIf(occs)
def testScrollRegion(self):
self.parser.dataReceived('\x1b[5;22r\x1b[r')
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setScrollRegion", (5, 22))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "setScrollRegion", (None, None))
self.failIf(occurrences(result))
self.failIf(occs)
def testHeightAndWidth(self):
self.parser.dataReceived("\x1b#3\x1b#4\x1b#5\x1b#6")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "doubleHeightLine", (True,))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleHeightLine", (False,))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "singleWidthLine")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleWidthLine")
self.failIf(occurrences(result))
self.failIf(occs)
def testCharacterSet(self):
self.parser.dataReceived(
''.join([''.join(['\x1b' + g + n for n in 'AB012']) for g in '()']))
occs = occurrences(self.proto)
for which in (G0, G1):
for charset in (CS_UK, CS_US, CS_DRAWING, CS_ALTERNATE, CS_ALTERNATE_SPECIAL):
result = self.assertCall(occs.pop(0), "selectCharacterSet", (charset, which))
self.failIf(occurrences(result))
self.failIf(occs)
def testShifting(self):
self.parser.dataReceived("\x15\x14")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "shiftIn")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "shiftOut")
self.failIf(occurrences(result))
self.failIf(occs)
def testSingleShifts(self):
self.parser.dataReceived("\x1bN\x1bO")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "singleShift2")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "singleShift3")
self.failIf(occurrences(result))
self.failIf(occs)
def testKeypadMode(self):
self.parser.dataReceived("\x1b=\x1b>")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "applicationKeypadMode")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "numericKeypadMode")
self.failIf(occurrences(result))
self.failIf(occs)
def testCursor(self):
self.parser.dataReceived("\x1b7\x1b8")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "saveCursor")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "restoreCursor")
self.failIf(occurrences(result))
self.failIf(occs)
def testReset(self):
self.parser.dataReceived("\x1bc")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reset")
self.failIf(occurrences(result))
self.failIf(occs)
def testIndex(self):
self.parser.dataReceived("\x1bD\x1bM\x1bE")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "index")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "reverseIndex")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "nextLine")
self.failIf(occurrences(result))
self.failIf(occs)
def testModes(self):
self.parser.dataReceived(
"\x1b[" + ';'.join(map(str, [modes.KAM, modes.IRM, modes.LNM])) + "h")
self.parser.dataReceived(
"\x1b[" + ';'.join(map(str, [modes.KAM, modes.IRM, modes.LNM])) + "l")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "resetModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.failIf(occurrences(result))
self.failIf(occs)
def testErasure(self):
self.parser.dataReceived(
"\x1b[K\x1b[1K\x1b[2K\x1b[J\x1b[1J\x1b[2J\x1b[3P")
occs = occurrences(self.proto)
for meth in ("eraseToLineEnd", "eraseToLineBeginning", "eraseLine",
"eraseToDisplayEnd", "eraseToDisplayBeginning",
"eraseDisplay"):
result = self.assertCall(occs.pop(0), meth)
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "deleteCharacter", (3,))
self.failIf(occurrences(result))
self.failIf(occs)
def testLineDeletion(self):
self.parser.dataReceived("\x1b[M\x1b[3M")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "deleteLine", (arg,))
self.failIf(occurrences(result))
self.failIf(occs)
def testLineInsertion(self):
self.parser.dataReceived("\x1b[L\x1b[3L")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "insertLine", (arg,))
self.failIf(occurrences(result))
self.failIf(occs)
def testCursorPosition(self):
methods(self.proto)['reportCursorPosition'] = (6, 7)
self.parser.dataReceived("\x1b[6n")
self.assertEqual(self.transport.value(), "\x1b[7;8R")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reportCursorPosition")
# This isn't really an interesting assert, since it only tests that
# our mock setup is working right, but I'll include it anyway.
self.assertEqual(result, (6, 7))
def test_applicationDataBytes(self):
"""
Contiguous non-control bytes are passed to a single call to the
C{write} method of the terminal to which the L{ClientProtocol} is
connected.
"""
occs = occurrences(self.proto)
self.parser.dataReceived('a')
self.assertCall(occs.pop(0), "write", ("a",))
self.parser.dataReceived('bc')
self.assertCall(occs.pop(0), "write", ("bc",))
def _applicationDataTest(self, data, calls):
occs = occurrences(self.proto)
self.parser.dataReceived(data)
while calls:
self.assertCall(occs.pop(0), *calls.pop(0))
self.assertFalse(occs, "No other calls should happen: %r" % (occs,))
def test_shiftInAfterApplicationData(self):
"""
Application data bytes followed by a shift-in command are passed to a
call to C{write} before the terminal's C{shiftIn} method is called.
"""
self._applicationDataTest(
'ab\x15', [
("write", ("ab",)),
("shiftIn",)])
def test_shiftOutAfterApplicationData(self):
"""
Application data bytes followed by a shift-out command are passed to a
call to C{write} before the terminal's C{shiftOut} method is called.
"""
self._applicationDataTest(
'ab\x14', [
("write", ("ab",)),
("shiftOut",)])
def test_cursorBackwardAfterApplicationData(self):
"""
Application data bytes followed by a cursor-backward command are passed
to a call to C{write} before the terminal's C{cursorBackward} method is
called.
"""
self._applicationDataTest(
'ab\x08', [
("write", ("ab",)),
("cursorBackward",)])
def test_escapeAfterApplicationData(self):
"""
Application data bytes followed by an escape character are passed to a
call to C{write} before the terminal's handler method for the escape is
called.
"""
# Test a short escape
self._applicationDataTest(
'ab\x1bD', [
("write", ("ab",)),
("index",)])
# And a long escape
self._applicationDataTest(
'ab\x1b[4h', [
("write", ("ab",)),
("setModes", ([4],))])
# There's some other cases too, but they're all handled by the same
# codepaths as above.
class ServerProtocolOutputTests(unittest.TestCase):
"""
Tests for the bytes L{ServerProtocol} writes to its transport when its
methods are called.
"""
def test_nextLine(self):
"""
L{ServerProtocol.nextLine} writes C{"\r\n"} to its transport.
"""
# Why doesn't it write ESC E? Because ESC E is poorly supported. For
# example, gnome-terminal (many different versions) fails to scroll if
# it receives ESC E and the cursor is already on the last row.
protocol = ServerProtocol()
transport = StringTransport()
protocol.makeConnection(transport)
protocol.nextLine()
self.assertEqual(transport.value(), "\r\n")
class Deprecations(unittest.TestCase):
"""
Tests to ensure deprecation of L{insults.colors} and L{insults.client}
"""
def ensureDeprecated(self, message):
"""
Ensures that the correct deprecation warning was issued.
"""
warnings = self.flushWarnings()
self.assertIdentical(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'], message)
self.assertEqual(len(warnings), 1)
def test_colors(self):
"""
The L{insults.colors} module is deprecated
"""
from twisted.conch.insults import colors
self.ensureDeprecated("twisted.conch.insults.colors was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.helper instead.")
def test_client(self):
"""
The L{insults.client} module is deprecated
"""
from twisted.conch.insults import client
self.ensureDeprecated("twisted.conch.insults.client was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.insults instead.")
| gpl-3.0 |
DakRomo/2017Challenges | challenge_3/python/mindm/src/majority.py | 3 | 1522 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from collections import defaultdict
def main():
if len(sys.argv) == 1: # Arguments provided by piping in shell
args = sys.stdin.read()
elif len(sys.argv) == 2: # Arguments provided as command line args
args = sys.argv[1]
else:
print("Error: too many arguments")
exit(1)
inlist = args_to_list(args)
test_digit(inlist)
# Map elements to a dictionary where the key is the element and increment
# the value (default value is 0 for each key initially)
sum_dict = defaultdict(int)
for elem in inlist:
sum_dict[elem] += 1
result = []
majority_threshold = len(inlist) / 2
for key, value in sum_dict.items():
if value > majority_threshold:
result.append(key)
if result:
print(result[0])
else:
print("No majority value found")
def args_to_list(arg_string):
""" Parses argument-string to a list
"""
# Strip whitespace -> strip brackets -> split to substrings ->
# -> strip whitespace
arg_list = [x.strip() for x in arg_string.strip().strip("[]").split(',')]
return arg_list
def test_digit(arr):
""" Exits if list contains non-numeric strings
"""
for element in arr:
if not element.isdigit():
print("Error: '{}' is not numeric.".format(element))
exit(1)
if __name__ == "__main__":
main()
| mit |
sudheesh001/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_targets.py | 16 | 13765 | #! /usr/bin/env python
# $Id: test_targets.py 7062 2011-06-30 22:14:29Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['targets'] = [
["""\
.. _target:
(Internal hyperlink target.)
""",
"""\
<document source="test data">
<target ids="target" names="target">
<paragraph>
(Internal hyperlink target.)
"""],
["""\
.. _optional space before colon :
""",
"""\
<document source="test data">
<target ids="optional-space-before-colon" names="optional\ space\ before\ colon">
"""],
["""\
External hyperlink targets:
.. _one-liner: http://structuredtext.sourceforge.net
.. _starts-on-this-line: http://
structuredtext.
sourceforge.net
.. _entirely-below:
http://structuredtext.
sourceforge.net
.. _not-indirect: uri\\_
""",
"""\
<document source="test data">
<paragraph>
External hyperlink targets:
<target ids="one-liner" names="one-liner" refuri="http://structuredtext.sourceforge.net">
<target ids="starts-on-this-line" names="starts-on-this-line" refuri="http://structuredtext.sourceforge.net">
<target ids="entirely-below" names="entirely-below" refuri="http://structuredtext.sourceforge.net">
<target ids="not-indirect" names="not-indirect" refuri="uri_">
"""],
["""\
Indirect hyperlink targets:
.. _target1: reference_
.. _target2: `phrase-link reference`_
""",
"""\
<document source="test data">
<paragraph>
Indirect hyperlink targets:
<target ids="target1" names="target1" refname="reference">
<target ids="target2" names="target2" refname="phrase-link reference">
"""],
["""\
.. _a long target name:
.. _`a target name: including a colon (quoted)`:
.. _a target name\: including a colon (escaped):
""",
"""\
<document source="test data">
<target ids="a-long-target-name" names="a\ long\ target\ name">
<target ids="a-target-name-including-a-colon-quoted" names="a\ target\ name:\ including\ a\ colon\ (quoted)">
<target ids="a-target-name-including-a-colon-escaped" names="a\ target\ name:\ including\ a\ colon\ (escaped)">
"""],
["""\
.. _`target: No matching backquote.
.. _`: No matching backquote either.
""",
"""\
<document source="test data">
<comment xml:space="preserve">
_`target: No matching backquote.
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<comment xml:space="preserve">
_`: No matching backquote either.
<system_message level="2" line="2" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
"""],
["""\
.. _a very long target name,
split across lines:
.. _`and another,
with backquotes`:
""",
"""\
<document source="test data">
<target ids="a-very-long-target-name-split-across-lines" names="a\ very\ long\ target\ name,\ split\ across\ lines">
<target ids="and-another-with-backquotes" names="and\ another,\ with\ backquotes">
"""],
["""\
External hyperlink:
.. _target: http://www.python.org/
""",
"""\
<document source="test data">
<paragraph>
External hyperlink:
<target ids="target" names="target" refuri="http://www.python.org/">
"""],
["""\
.. _email: [email protected]
.. _multi-line email: jdoe
@example.com
""",
"""\
<document source="test data">
<target ids="email" names="email" refuri="mailto:[email protected]">
<target ids="multi-line-email" names="multi-line\ email" refuri="mailto:[email protected]">
"""],
["""\
Malformed target:
.. __malformed: no good
Target beginning with an underscore:
.. _`_target`: OK
""",
"""\
<document source="test data">
<paragraph>
Malformed target:
<comment xml:space="preserve">
__malformed: no good
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<paragraph>
Target beginning with an underscore:
<target ids="target" names="_target" refuri="OK">
"""],
["""\
Duplicate external targets (different URIs):
.. _target: first
.. _target: second
""",
"""\
<document source="test data">
<paragraph>
Duplicate external targets (different URIs):
<target dupnames="target" ids="target" refuri="first">
<system_message backrefs="id1" level="2" line="5" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id1" refuri="second">
"""],
["""\
Duplicate external targets (same URIs):
.. _target: first
.. _target: first
""",
"""\
<document source="test data">
<paragraph>
Duplicate external targets (same URIs):
<target ids="target" names="target" refuri="first">
<system_message backrefs="id1" level="1" line="5" source="test data" type="INFO">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id1" refuri="first">
"""],
["""\
Duplicate implicit targets.
Title
=====
Paragraph.
Title
=====
Paragraph.
""",
"""\
<document source="test data">
<paragraph>
Duplicate implicit targets.
<section dupnames="title" ids="title">
<title>
Title
<paragraph>
Paragraph.
<section dupnames="title" ids="id1">
<title>
Title
<system_message backrefs="id1" level="1" line="9" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "title".
<paragraph>
Paragraph.
"""],
["""\
Duplicate implicit/explicit targets.
Title
=====
.. _title:
Paragraph.
""",
"""\
<document source="test data">
<paragraph>
Duplicate implicit/explicit targets.
<section dupnames="title" ids="title">
<title>
Title
<system_message backrefs="id1" level="1" line="6" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "title".
<target ids="id1" names="title">
<paragraph>
Paragraph.
"""],
["""\
Duplicate implicit/directive targets.
Title
=====
.. target-notes::
:name: title
""",
"""\
<document source="test data">
<paragraph>
Duplicate implicit/directive targets.
<section dupnames="title" ids="title">
<title>
Title
<pending ids="id1" names="title">
<system_message backrefs="id1" level="1" line="4" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "title".
.. internal attributes:
.transform: docutils.transforms.references.TargetNotes
.details:
"""],
["""\
Duplicate explicit targets.
.. _title:
First.
.. _title:
Second.
.. _title:
Third.
""",
"""\
<document source="test data">
<paragraph>
Duplicate explicit targets.
<target dupnames="title" ids="title">
<paragraph>
First.
<system_message backrefs="id1" level="2" line="7" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "title".
<target dupnames="title" ids="id1">
<paragraph>
Second.
<system_message backrefs="id2" level="2" line="11" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "title".
<target dupnames="title" ids="id2">
<paragraph>
Third.
"""],
["""\
Duplicate explicit/directive targets.
.. _title:
First.
.. rubric:: this is a title too
:name: title
""",
"""\
<document source="test data">
<paragraph>
Duplicate explicit/directive targets.
<target dupnames="title" ids="title">
<paragraph>
First.
<rubric dupnames="title" ids="id1">
this is a title too
<system_message backrefs="id1" level="2" line="9" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "title".
"""],
["""\
Duplicate targets:
Target
======
Implicit section header target.
.. [TARGET] Citation target.
.. [#target] Autonumber-labeled footnote target.
.. _target:
Explicit internal target.
.. _target: Explicit_external_target
.. rubric:: directive with target
:name: Target
""",
"""\
<document source="test data">
<paragraph>
Duplicate targets:
<section dupnames="target" ids="target">
<title>
Target
<paragraph>
Implicit section header target.
<citation dupnames="target" ids="id1">
<label>
TARGET
<system_message backrefs="id1" level="1" line="8" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "target".
<paragraph>
Citation target.
<footnote auto="1" dupnames="target" ids="id2">
<system_message backrefs="id2" level="2" line="10" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<paragraph>
Autonumber-labeled footnote target.
<system_message backrefs="id3" level="2" line="12" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id3">
<paragraph>
Explicit internal target.
<system_message backrefs="id4" level="2" line="16" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id4" refuri="Explicit_external_target">
<rubric dupnames="target" ids="id5">
directive with target
<system_message backrefs="id5" level="2" line="4" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
"""],
["""\
.. _unescaped colon at end:: no good
.. _:: no good either
.. _escaped colon\:: OK
.. _`unescaped colon, quoted:`: OK
""",
"""\
<document source="test data">
<comment xml:space="preserve">
_unescaped colon at end:: no good
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<comment xml:space="preserve">
_:: no good either
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<target ids="escaped-colon" names="escaped\ colon:" refuri="OK">
<target ids="unescaped-colon-quoted" names="unescaped\ colon,\ quoted:" refuri="OK">
"""],
]
totest['anonymous_targets'] = [
["""\
Anonymous external hyperlink target:
.. __: http://w3c.org/
""",
"""\
<document source="test data">
<paragraph>
Anonymous external hyperlink target:
<target anonymous="1" ids="id1" refuri="http://w3c.org/">
"""],
["""\
Anonymous external hyperlink target:
__ http://w3c.org/
""",
"""\
<document source="test data">
<paragraph>
Anonymous external hyperlink target:
<target anonymous="1" ids="id1" refuri="http://w3c.org/">
"""],
["""\
Anonymous indirect hyperlink target:
.. __: reference_
""",
"""\
<document source="test data">
<paragraph>
Anonymous indirect hyperlink target:
<target anonymous="1" ids="id1" refname="reference">
"""],
["""\
Anonymous external hyperlink target, not indirect:
__ uri\\_
__ this URI ends with an underscore_
""",
"""\
<document source="test data">
<paragraph>
Anonymous external hyperlink target, not indirect:
<target anonymous="1" ids="id1" refuri="uri_">
<target anonymous="1" ids="id2" refuri="thisURIendswithanunderscore_">
"""],
["""\
Anonymous indirect hyperlink targets:
__ reference_
__ `a very long
reference`_
""",
"""\
<document source="test data">
<paragraph>
Anonymous indirect hyperlink targets:
<target anonymous="1" ids="id1" refname="reference">
<target anonymous="1" ids="id2" refname="a very long reference">
"""],
["""\
Mixed anonymous & named indirect hyperlink targets:
__ reference_
.. __: reference_
__ reference_
.. _target1: reference_
no blank line
.. _target2: reference_
__ reference_
.. __: reference_
__ reference_
no blank line
""",
"""\
<document source="test data">
<paragraph>
Mixed anonymous & named indirect hyperlink targets:
<target anonymous="1" ids="id1" refname="reference">
<target anonymous="1" ids="id2" refname="reference">
<target anonymous="1" ids="id3" refname="reference">
<target ids="target1" names="target1" refname="reference">
<system_message level="2" line="7" source="test data" type="WARNING">
<paragraph>
Explicit markup ends without a blank line; unexpected unindent.
<paragraph>
no blank line
<target ids="target2" names="target2" refname="reference">
<target anonymous="1" ids="id4" refname="reference">
<target anonymous="1" ids="id5" refname="reference">
<target anonymous="1" ids="id6" refname="reference">
<system_message level="2" line="13" source="test data" type="WARNING">
<paragraph>
Explicit markup ends without a blank line; unexpected unindent.
<paragraph>
no blank line
"""],
["""\
.. _
""",
"""\
<document source="test data">
<comment xml:space="preserve">
_
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
fuhongliang/erpnext | erpnext/patches/v6_0/set_default_title.py | 19 | 1193 | import frappe
def execute():
frappe.reload_doctype("Quotation")
frappe.db.sql("""update tabQuotation set title = customer_name""")
frappe.reload_doctype("Sales Order")
frappe.db.sql("""update `tabSales Order` set title = customer_name""")
frappe.reload_doctype("Delivery Note")
frappe.db.sql("""update `tabDelivery Note` set title = customer_name""")
frappe.reload_doctype("Material Request")
frappe.db.sql("""update `tabMaterial Request` set title = material_request_type""")
frappe.reload_doctype("Supplier Quotation")
frappe.db.sql("""update `tabSupplier Quotation` set title = supplier_name""")
frappe.reload_doctype("Purchase Order")
frappe.db.sql("""update `tabPurchase Order` set title = supplier_name""")
frappe.reload_doctype("Purchase Receipt")
frappe.db.sql("""update `tabPurchase Receipt` set title = supplier_name""")
frappe.reload_doctype("Purchase Invoice")
frappe.db.sql("""update `tabPurchase Invoice` set title = supplier_name""")
frappe.reload_doctype("Stock Entry")
frappe.db.sql("""update `tabStock Entry` set title = purpose""")
frappe.reload_doctype("Sales Invoice")
frappe.db.sql("""update `tabSales Invoice` set title = customer_name""")
| agpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/network/aci/aci_epg_to_contract.py | 27 | 9836 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_epg_to_contract
short_description: Bind EPGs to Contracts (fv:RsCons, fv:RsProv)
description:
- Bind EPGs to Contracts on Cisco ACI fabrics.
notes:
- The C(tenant), C(app_profile), C(EPG), and C(Contract) used must exist before using this module in your playbook.
The M(aci_tenant), M(aci_ap), M(aci_epg), and M(aci_contract) modules can be used for this.
version_added: '2.4'
options:
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
type: str
aliases: [ app_profile, app_profile_name ]
contract:
description:
- The name of the contract.
type: str
aliases: [ contract_name ]
contract_type:
description:
- Determines if the EPG should Provide or Consume the Contract.
type: str
required: yes
choices: [ consumer, provider ]
epg:
description:
- The name of the end point group.
type: str
aliases: [ epg_name ]
priority:
description:
- QoS class.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ level1, level2, level3, unspecified ]
provider_match:
description:
- The matching algorithm for Provided Contracts.
- The APIC defaults to C(at_least_one) when unset during creation.
type: str
choices: [ all, at_least_one, at_most_one, none ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
type: str
aliases: [ tenant_name ]
extends_documentation_fragment: aci
seealso:
- module: aci_ap
- module: aci_epg
- module: aci_contract
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(fv:RsCons) and B(fv:RsProv).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r'''
- name: Add a new contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: present
delegate_to: localhost
- name: Remove an existing contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: absent
delegate_to: localhost
- name: Query a specific contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: query
delegate_to: localhost
register: query_result
- name: Query all provider contract to EPG bindings
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
contract_type: provider
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
ACI_CLASS_MAPPING = dict(
consumer={
'class': 'fvRsCons',
'rn': 'rscons-',
},
provider={
'class': 'fvRsProv',
'rn': 'rsprov-',
},
)
PROVIDER_MATCH_MAPPING = dict(
all='All',
at_least_one='AtleastOne',
at_most_one='tmostOne',
none='None',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract_type=dict(type='str', required=True, choices=['consumer', 'provider']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
epg=dict(type='str', aliases=['epg_name']), # Not required for querying all objects
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']),
provider_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ap', 'contract', 'epg', 'tenant']],
['state', 'present', ['ap', 'contract', 'epg', 'tenant']],
],
)
ap = module.params['ap']
contract = module.params['contract']
contract_type = module.params['contract_type']
epg = module.params['epg']
priority = module.params['priority']
provider_match = module.params['provider_match']
if provider_match is not None:
provider_match = PROVIDER_MATCH_MAPPING[provider_match]
state = module.params['state']
tenant = module.params['tenant']
aci_class = ACI_CLASS_MAPPING[contract_type]["class"]
aci_rn = ACI_CLASS_MAPPING[contract_type]["rn"]
if contract_type == "consumer" and provider_match is not None:
module.fail_json(msg="the 'provider_match' is only configurable for Provided Contracts")
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
module_object=ap,
target_filter={'name': ap},
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
module_object=epg,
target_filter={'name': epg},
),
subclass_3=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_rn, contract),
module_object=contract,
target_filter={'tnVzBrCPName': contract},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class=aci_class,
class_config=dict(
matchT=provider_match,
prio=priority,
tnVzBrCPName=contract,
),
)
aci.get_diff(aci_class=aci_class)
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
billy-inn/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
rcarrillocruz/ansible | lib/ansible/module_utils/facts/other/facter.py | 232 | 2985 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
| gpl-3.0 |
endlessm/chromium-browser | tools/site_compare/scrapers/chrome/chromebase.py | 189 | 5358 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for all currently-known versions of Chrome"""
import pywintypes
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# TODO: this has moved, use some logic to find it. For now,
# expects a subst k:.
DEFAULT_PATH = r"k:\chrome.exe"
def InvokeBrowser(path):
"""Invoke the Chrome browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, address bar, render pane)
"""
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
# TODO(jhaas): make this work with Vista
wnds = windowing.FindChildWindows(0, "Chrome_XPFrame")
if len(wnds):
wnd = wnds[0]
proc = None
else:
# Invoke Chrome
(proc, wnd) = windowing.InvokeAndWait(path)
# Get windows we'll need
address_bar = windowing.FindChildWindow(wnd, "Chrome_AutocompleteEdit")
render_pane = GetChromeRenderPane(wnd)
return (wnd, proc, address_bar, render_pane)
def Scrape(urls, outdir, size, pos, timeout, kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
timedout = False
for url in urls:
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
if proc:
windowing.SetForegroundWindow(wnd)
# Send Alt-F4, then wait for process to end
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return "crashed"
if timedout:
return "timeout"
return None
def Time(urls, size, timeout, kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
ret.append( (url, load_time) )
if proc:
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\chrome\0.1.97.0"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape([
"http://www.microsoft.com",
"http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
jmesmon/trifles | 2011s_lab2/gen_spice2.py | 1 | 4333 | #! /usr/bin/env python
act2_spice = """\
* Lab 1, Act 2, BJT {bjt}, Part {part}, Plot Num {qn}
Vs Vi1 0 {wave}
Vcc Vi2 0 DC 5
RC1 Vi2 Vo1 {rc}
RB1 Vi1 Vb {rb}
Q1 Vo1 Vb 0 {bjt}
{extra}
* Model for 2N3904 NPN BJT (from Eval library in Pspice)
.model 2N3904 NPN(Is=6.734f Xti=3 Eg=1.11 Vaf=74.03 Bf=416.4 Ne=1.259
+ Ise=6.734f Ikf=66.78m Xtb=1.5 Br=.7371 Nc=2 Isc=0 Ikr=0 Rc=1
+ Cjc=3.638p Mjc=.3085 Vjc=.75 Fc=.5 Cje=4.493p Mje=.2593 Vje=.75
+ Tr=239.5n Tf=301.2p Itf=.4 Vtf=4 Xtf=2 Rb=10)
.MODEL tip31 npn
+IS=1e-09 BF=3656.16 NF=1.23899 VAF=10
+IKF=0.0333653 ISE=1e-08 NE=2.29374 BR=0.1
+NR=1.5 VAR=100 IKR=0.333653 ISC=1e-08
+NC=1.75728 RB=6.15083 IRB=100 RBM=0.00113049
+RE=0.0001 RC=0.0491489 XTB=50 XTI=1
+EG=1.05 CJE=3.26475e-10 VJE=0.446174 MJE=0.464221
+TF=2.06218e-09 XTF=15.0842 VTF=25.7317 ITF=0.001
+CJC=3.07593e-10 VJC=0.775484 MJC=0.476498 XCJC=0.750493
+FC=0.796407 CJS=0 VJS=0.75 MJS=0.5
+TR=9.57121e-06 PTF=0 KF=0 AF=1
.control
{action}
hardcopy {fname}.eps {plot}
.endc
.end
"""
def set_freq(defs):
freq = defs['freq']
period = 1.0 / freq
defs['period'] = period
defs['pw'] = period / 2
def a2():
defs = {
'qn': 0,
}
models = [ {'bjt': 'tip31'},
{'bjt': '2n3904'} ]
parts = [ {
'part': 1,
'action': 'dc Vs 0 5 0.2',
'extra': '',
'plot': 'V(Vo1) V(Vi1)',
'rc': 470,
'rb': 2000,
'wave': 'DC 0'
}, {
'part':2,
'action': 'tran {ts} {all_time}',
'extra': '',
'plot': 'V(Vo1) V(Vi1)',
'wave': 'PULSE( {Vil}, {Vih}, 0, {ts}, {ts}, {pw}, {period} )',
'ts': '10NS',
'freq': 10e3,
'_over' : [
{ 'rc': 1e3, 'rb': 10e3, 'Vil':0, 'Vih':5 },
{ 'rc': .1e3,'rb': 1e3, 'Vil':0, 'Vih':5 },
{ 'rc': .1e3,'rb': 1e3, 'Vil':-5, 'Vih':5}
]
}, {
'part':3,
'rb':2000,
'rc':470,
'extra': """\
* attach shotkey diode between B and C
D1 Vb Vo1 SR102
""",
'_over': [
# p1
{ 'freq': 10e3,
'wave': 'PULSE( 0, 5, 0, {ts}, {ts}, {pw}, {period} )',
'ts':'10NS',
'action': 'tran {ts} {all_time}',
'plot': 'V(Vo1) V(Vi1)',
}, { # p2
'freq': 10e3,
'wave': 'PULSE( -5, 5, 0, {ts}, {ts}, {pw}, {period} )',
'ts':'2NS',
'action': 'tran {ts} {all_time}',
'plot': 'V(Vo1) V(Vi1)'
}, { # p3
# OH god, i need current measurments.
'wave': 'DC 5',
'plot': 'I(Vs)',
'action': 'tran 2NS 4NS'
}
]
}, {
'part': 4,
'rb': 2000,
'rc': 470,
'extra': """\
* attach a cap across Vi1 and Vb
C1 Vi1 Vb 1000pF
""",
'wave': 'PULSE( -5 , 5, 0, {ts}, {ts}, {pw}, {period} )',
'action': 'tran {ts} {all_time}',
'plot': 'V(Vo1) V(Vi1)',
'ts': '10NS',
'freq': 10e3
} ]
for model in models:
m_defs = dict(defs.items() + model.items())
for part in parts:
p_defs = dict(m_defs.items() + part.items())
defs['qn'] = p_defs['qn'] = m_defs['qn'] = proc(p_defs)
def proc(defs):
if '_over' in defs:
cdefs = dict(defs.items())
del cdefs['_over']
qn = defs['qn']
for inner in defs['_over']:
n_defs = dict(cdefs.items() + inner.items())
qn = cdefs['qn'] = proc(n_defs)
return qn
else:
defs['qn'] = defs['qn'] + 1
fname = '{bjt}_{part}_{qn:02}'.format(**defs)
defs['fname'] = fname
try:
set_freq(defs)
defs['all_time'] = defs['period'] * 2
except:
pass
defs['action'] = defs['action'].format(**defs)
defs['wave'] = defs['wave'].format(**defs)
f = open(fname + '.spice.gen', 'w')
f.write(act2_spice.format(**defs))
return defs['qn']
if __name__ == "__main__":
a2()
| gpl-3.0 |
ayushin78/coala | coalib/bears/GlobalBear.py | 10 | 1175 | from coalib.bears.Bear import Bear
from coalib.bears.BEAR_KIND import BEAR_KIND
class GlobalBear(Bear):
"""
A GlobalBear is able to analyze semantic facts across several file.
The results of a GlobalBear will be presented grouped by the origin Bear.
Therefore Results spanning above multiple files are allowed and will be
handled right.
If you only look at one file at once anyway a LocalBear is better for your
needs. (And better for performance and usability for both user and
developer.)
"""
def __init__(self,
file_dict, # filename : file contents
section,
message_queue,
timeout=0):
Bear.__init__(self, section, message_queue, timeout)
self.file_dict = file_dict
@staticmethod
def kind():
return BEAR_KIND.GLOBAL
def run(self,
*args,
dependency_results=None,
**kwargs):
"""
Handles all files in file_dict.
:return: A list of Result type.
"""
raise NotImplementedError(
'This function has to be implemented for a runnable bear.')
| agpl-3.0 |
ajose01/rethinkdb | external/v8_3.30.33.16/build/gyp/test/win/gyptest-cl-optimizations.py | 247 | 3416 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure optimization settings are extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('optimizations.gyp', chdir=CHDIR)
# It's hard to map flags to output contents in a non-fragile way (especially
# handling both 2008/2010), so just verify the correct ninja command line
# contents.
ninja_file = test.built_file_path('obj/test_opt_off.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /Od')
ninja_file = test.built_file_path('obj/test_opt_lev_size.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /O1')
ninja_file = test.built_file_path('obj/test_opt_lev_speed.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /O2')
ninja_file = test.built_file_path('obj/test_opt_lev_max.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /Ox')
ninja_file = test.built_file_path('obj/test_opt_unset.ninja', chdir=CHDIR)
test.must_not_contain(ninja_file, '/Od')
test.must_not_contain(ninja_file, '/O1')
test.must_not_contain(ninja_file, '/Ox')
# Set by default if none specified.
test.must_contain(ninja_file, '/O2')
ninja_file = test.built_file_path('obj/test_opt_fpo.ninja', chdir=CHDIR)
test.must_contain(ninja_file, '/Oy')
test.must_not_contain(ninja_file, '/Oy-')
ninja_file = test.built_file_path('obj/test_opt_fpo_off.ninja', chdir=CHDIR)
test.must_contain(ninja_file, '/Oy-')
ninja_file = test.built_file_path('obj/test_opt_intrinsic.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Oi')
test.must_not_contain(ninja_file, '/Oi-')
ninja_file = test.built_file_path('obj/test_opt_intrinsic_off.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Oi-')
ninja_file = test.built_file_path('obj/test_opt_inline_off.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob0')
ninja_file = test.built_file_path('obj/test_opt_inline_manual.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob1')
ninja_file = test.built_file_path('obj/test_opt_inline_auto.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob2')
ninja_file = test.built_file_path('obj/test_opt_neither.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/Os')
test.must_not_contain(ninja_file, '/Ot')
ninja_file = test.built_file_path('obj/test_opt_size.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Os')
ninja_file = test.built_file_path('obj/test_opt_speed.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ot')
ninja_file = test.built_file_path('obj/test_opt_wpo.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GL')
ninja_file = test.built_file_path('obj/test_opt_sp.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GF')
ninja_file = test.built_file_path('obj/test_opt_sp_off.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/GF')
ninja_file = test.built_file_path('obj/test_opt_fso.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GT')
ninja_file = test.built_file_path('obj/test_opt_fso_off.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/GT')
test.pass_test()
| agpl-3.0 |
ibinti/intellij-community | plugins/hg4idea/testData/bin/mercurial/osutil.py | 90 | 5363 | # osutil.py - pure Python version of osutil.c
#
# Copyright 2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import stat as statmod
def _mode_to_kind(mode):
if statmod.S_ISREG(mode):
return statmod.S_IFREG
if statmod.S_ISDIR(mode):
return statmod.S_IFDIR
if statmod.S_ISLNK(mode):
return statmod.S_IFLNK
if statmod.S_ISBLK(mode):
return statmod.S_IFBLK
if statmod.S_ISCHR(mode):
return statmod.S_IFCHR
if statmod.S_ISFIFO(mode):
return statmod.S_IFIFO
if statmod.S_ISSOCK(mode):
return statmod.S_IFSOCK
return mode
def listdir(path, stat=False, skip=None):
'''listdir(path, stat=False) -> list_of_tuples
Return a sorted list containing information about the entries
in the directory.
If stat is True, each element is a 3-tuple:
(name, type, stat object)
Otherwise, each element is a 2-tuple:
(name, type)
'''
result = []
prefix = path
if not prefix.endswith(os.sep):
prefix += os.sep
names = os.listdir(path)
names.sort()
for fn in names:
st = os.lstat(prefix + fn)
if fn == skip and statmod.S_ISDIR(st.st_mode):
return []
if stat:
result.append((fn, _mode_to_kind(st.st_mode), st))
else:
result.append((fn, _mode_to_kind(st.st_mode)))
return result
if os.name != 'nt':
posixfile = open
else:
import ctypes, msvcrt
_kernel32 = ctypes.windll.kernel32
_DWORD = ctypes.c_ulong
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_CREATE_ALWAYS = 2
_OPEN_EXISTING = 3
_OPEN_ALWAYS = 4
_GENERIC_READ = 0x80000000
_GENERIC_WRITE = 0x40000000
_FILE_ATTRIBUTE_NORMAL = 0x80
# open_osfhandle flags
_O_RDONLY = 0x0000
_O_RDWR = 0x0002
_O_APPEND = 0x0008
_O_TEXT = 0x4000
_O_BINARY = 0x8000
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
def _raiseioerror(name):
err = ctypes.WinError()
raise IOError(err.errno, '%s: %s' % (name, err.strerror))
class posixfile(object):
'''a file object aiming for POSIX-like semantics
CPython's open() returns a file that was opened *without* setting the
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
This even happens if any hardlinked copy of the file is in open state.
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
renamed and deleted while they are held open.
Note that if a file opened with posixfile is unlinked, the file
remains but cannot be opened again or be recreated under the same name,
until all reading processes have closed the file.'''
def __init__(self, name, mode='r', bufsize=-1):
if 'b' in mode:
flags = _O_BINARY
else:
flags = _O_TEXT
m0 = mode[0]
if m0 == 'r' and '+' not in mode:
flags |= _O_RDONLY
access = _GENERIC_READ
else:
# work around http://support.microsoft.com/kb/899149 and
# set _O_RDWR for 'w' and 'a', even if mode has no '+'
flags |= _O_RDWR
access = _GENERIC_READ | _GENERIC_WRITE
if m0 == 'r':
creation = _OPEN_EXISTING
elif m0 == 'w':
creation = _CREATE_ALWAYS
elif m0 == 'a':
creation = _OPEN_ALWAYS
flags |= _O_APPEND
else:
raise ValueError("invalid mode: %s" % mode)
fh = _kernel32.CreateFileA(name, access,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, creation, _FILE_ATTRIBUTE_NORMAL, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseioerror(name)
fd = msvcrt.open_osfhandle(fh, flags)
if fd == -1:
_kernel32.CloseHandle(fh)
_raiseioerror(name)
f = os.fdopen(fd, mode, bufsize)
# unfortunately, f.name is '<fdopen>' at this point -- so we store
# the name on this wrapper. We cannot just assign to f.name,
# because that attribute is read-only.
object.__setattr__(self, 'name', name)
object.__setattr__(self, '_file', f)
def __iter__(self):
return self._file
def __getattr__(self, name):
return getattr(self._file, name)
def __setattr__(self, name, value):
'''mimics the read-only attributes of Python file objects
by raising 'TypeError: readonly attribute' if someone tries:
f = posixfile('foo.txt')
f.name = 'bla' '''
return self._file.__setattr__(name, value)
| apache-2.0 |
mcepl/rope | rope/contrib/generate.py | 3 | 14402 | import rope.base.evaluate
from rope.base import libutils
from rope.base import (change, pyobjects, exceptions, pynames, worder,
codeanalyze)
from rope.refactor import sourceutils, importutils, functionutils, suites
def create_generate(kind, project, resource, offset, goal_resource=None):
"""A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'.
"""
generate = eval('Generate' + kind.title())
return generate(project, resource, offset, goal_resource=goal_resource)
def create_module(project, name, sourcefolder=None):
"""Creates a module and returns a `rope.base.resources.File`"""
if sourcefolder is None:
sourcefolder = project.root
packages = name.split('.')
parent = sourcefolder
for package in packages[:-1]:
parent = parent.get_child(package)
return parent.create_file(packages[-1] + '.py')
def create_package(project, name, sourcefolder=None):
"""Creates a package and returns a `rope.base.resources.Folder`"""
if sourcefolder is None:
sourcefolder = project.root
packages = name.split('.')
parent = sourcefolder
for package in packages[:-1]:
parent = parent.get_child(package)
made_packages = parent.create_folder(packages[-1])
made_packages.create_file('__init__.py')
return made_packages
class _Generate(object):
def __init__(self, project, resource, offset, goal_resource=None):
self.project = project
self.resource = resource
self.goal_resource = goal_resource
self.info = self._generate_info(project, resource, offset)
self.name = self.info.get_name()
self._check_exceptional_conditions()
def _generate_info(self, project, resource, offset):
return _GenerationInfo(project.pycore, resource, offset, self.goal_resource)
def _check_exceptional_conditions(self):
if self.info.element_already_exists():
raise exceptions.RefactoringError(
'Element <%s> already exists.' % self.name)
if not self.info.primary_is_found():
raise exceptions.RefactoringError(
'Cannot determine the scope <%s> should be defined in.' %
self.name)
def get_changes(self):
changes = change.ChangeSet('Generate %s <%s>' %
(self._get_element_kind(), self.name))
indents = self.info.get_scope_indents()
blanks = self.info.get_blank_lines()
base_definition = sourceutils.fix_indentation(self._get_element(),
indents)
definition = '\n' * blanks[0] + base_definition + '\n' * blanks[1]
resource = self.info.get_insertion_resource()
start, end = self.info.get_insertion_offsets()
collector = codeanalyze.ChangeCollector(resource.read())
collector.add_change(start, end, definition)
changes.add_change(change.ChangeContents(
resource, collector.get_changed()))
if self.goal_resource:
relative_import = _add_relative_import_to_module(self.project, self.resource, self.goal_resource, self.name)
changes.add_change(relative_import)
return changes
def get_location(self):
return (self.info.get_insertion_resource(),
self.info.get_insertion_lineno())
def _get_element_kind(self):
raise NotImplementedError()
def _get_element(self):
raise NotImplementedError()
class GenerateFunction(_Generate):
def _generate_info(self, project, resource, offset):
return _FunctionGenerationInfo(project.pycore, resource, offset)
def _get_element(self):
decorator = ''
args = []
if self.info.is_static_method():
decorator = '@staticmethod\n'
if self.info.is_method() or self.info.is_constructor() or \
self.info.is_instance():
args.append('self')
args.extend(self.info.get_passed_args())
definition = '%sdef %s(%s):\n pass\n' % (decorator, self.name,
', '.join(args))
return definition
def _get_element_kind(self):
return 'Function'
class GenerateVariable(_Generate):
def _get_element(self):
return '%s = None\n' % self.name
def _get_element_kind(self):
return 'Variable'
class GenerateClass(_Generate):
def _get_element(self):
return 'class %s(object):\n pass\n' % self.name
def _get_element_kind(self):
return 'Class'
class GenerateModule(_Generate):
def get_changes(self):
package = self.info.get_package()
changes = change.ChangeSet('Generate Module <%s>' % self.name)
new_resource = self.project.get_file('%s/%s.py' %
(package.path, self.name))
if new_resource.exists():
raise exceptions.RefactoringError(
'Module <%s> already exists' % new_resource.path)
changes.add_change(change.CreateResource(new_resource))
changes.add_change(_add_import_to_module(
self.project, self.resource, new_resource))
return changes
def get_location(self):
package = self.info.get_package()
return (package.get_child('%s.py' % self.name), 1)
class GeneratePackage(_Generate):
def get_changes(self):
package = self.info.get_package()
changes = change.ChangeSet('Generate Package <%s>' % self.name)
new_resource = self.project.get_folder('%s/%s' %
(package.path, self.name))
if new_resource.exists():
raise exceptions.RefactoringError(
'Package <%s> already exists' % new_resource.path)
changes.add_change(change.CreateResource(new_resource))
changes.add_change(_add_import_to_module(
self.project, self.resource, new_resource))
child = self.project.get_folder(package.path + '/' + self.name)
changes.add_change(change.CreateFile(child, '__init__.py'))
return changes
def get_location(self):
package = self.info.get_package()
child = package.get_child(self.name)
return (child.get_child('__init__.py'), 1)
def _add_import_to_module(project, resource, imported):
pymodule = project.get_pymodule(resource)
import_tools = importutils.ImportTools(project)
module_imports = import_tools.module_imports(pymodule)
module_name = libutils.modname(imported)
new_import = importutils.NormalImport(((module_name, None), ))
module_imports.add_import(new_import)
return change.ChangeContents(resource, module_imports.get_changed_source())
def _add_relative_import_to_module(project, resource, imported, name):
pymodule = project.get_pymodule(resource)
import_tools = importutils.ImportTools(project)
module_imports = import_tools.module_imports(pymodule)
new_import = import_tools.get_from_import(imported, name)
module_imports.add_import(new_import)
return change.ChangeContents(resource, module_imports.get_changed_source())
class _GenerationInfo(object):
def __init__(self, pycore, resource, offset, goal_resource=None):
self.pycore = pycore
self.resource = resource
self.offset = offset
self.goal_resource = goal_resource
self.source_pymodule = self.pycore.project.get_pymodule(resource)
finder = rope.base.evaluate.ScopeNameFinder(self.source_pymodule)
self.primary, self.pyname = finder.get_primary_and_pyname_at(offset)
self._init_fields()
def _init_fields(self):
self.source_scope = self._get_source_scope()
self.goal_scope = self._get_goal_scope()
self.goal_pymodule = self._get_goal_module(self.goal_scope)
def _get_goal_scope(self):
if self.primary is None:
if self.goal_resource:
return self.pycore.project.get_pymodule(self.goal_resource).get_scope()
else:
return self._get_source_scope()
pyobject = self.primary.get_object()
if isinstance(pyobject, pyobjects.PyDefinedObject):
return pyobject.get_scope()
elif isinstance(pyobject.get_type(), pyobjects.PyClass):
return pyobject.get_type().get_scope()
def _get_goal_module(self, scope):
if scope is None:
return
while scope.parent is not None:
scope = scope.parent
return scope.pyobject
def _get_source_scope(self):
module_scope = self.source_pymodule.get_scope()
lineno = self.source_pymodule.lines.get_line_number(self.offset)
return module_scope.get_inner_scope_for_line(lineno)
def get_insertion_lineno(self):
lines = self.goal_pymodule.lines
if self.goal_scope == self.source_scope:
line_finder = self.goal_pymodule.logical_lines
lineno = lines.get_line_number(self.offset)
lineno = line_finder.logical_line_in(lineno)[0]
root = suites.ast_suite_tree(self.goal_scope.pyobject.get_ast())
suite = root.find_suite(lineno)
indents = sourceutils.get_indents(lines, lineno)
while self.get_scope_indents() < indents:
lineno = suite.get_start()
indents = sourceutils.get_indents(lines, lineno)
suite = suite.parent
return lineno
else:
return min(self.goal_scope.get_end() + 1, lines.length())
def get_insertion_resource(self):
return self.goal_pymodule.get_resource()
def get_insertion_offsets(self):
if self.goal_scope.get_kind() == 'Class':
start, end = sourceutils.get_body_region(self.goal_scope.pyobject)
if self.goal_pymodule.source_code[start:end].strip() == 'pass':
return start, end
lines = self.goal_pymodule.lines
start = lines.get_line_start(self.get_insertion_lineno())
return (start, start)
def get_scope_indents(self):
if self.goal_scope.get_kind() == 'Module':
return 0
return sourceutils.get_indents(self.goal_pymodule.lines,
self.goal_scope.get_start()) + 4
def get_blank_lines(self):
if self.goal_scope.get_kind() == 'Module':
base_blanks = 2
if self.goal_pymodule.source_code.strip() == '':
base_blanks = 0
if self.goal_scope.get_kind() == 'Class':
base_blanks = 1
if self.goal_scope.get_kind() == 'Function':
base_blanks = 0
if self.goal_scope == self.source_scope:
return (0, base_blanks)
return (base_blanks, 0)
def get_package(self):
primary = self.primary
if self.primary is None:
return self.pycore.project.get_source_folders()[0]
if isinstance(primary.get_object(), pyobjects.PyPackage):
return primary.get_object().get_resource()
raise exceptions.RefactoringError(
'A module/package can be only created in a package.')
def primary_is_found(self):
return self.goal_scope is not None
def element_already_exists(self):
if self.pyname is None or isinstance(self.pyname, pynames.UnboundName):
return False
return self.get_name() in self.goal_scope.get_defined_names()
def get_name(self):
return worder.get_name_at(self.resource, self.offset)
class _FunctionGenerationInfo(_GenerationInfo):
def _get_goal_scope(self):
if self.is_constructor():
return self.pyname.get_object().get_scope()
if self.is_instance():
return self.pyname.get_object().get_type().get_scope()
if self.primary is None:
return self._get_source_scope()
pyobject = self.primary.get_object()
if isinstance(pyobject, pyobjects.PyDefinedObject):
return pyobject.get_scope()
elif isinstance(pyobject.get_type(), pyobjects.PyClass):
return pyobject.get_type().get_scope()
def element_already_exists(self):
if self.pyname is None or isinstance(self.pyname, pynames.UnboundName):
return False
return self.get_name() in self.goal_scope.get_defined_names()
def is_static_method(self):
return self.primary is not None and \
isinstance(self.primary.get_object(), pyobjects.PyClass)
def is_method(self):
return self.primary is not None and \
isinstance(self.primary.get_object().get_type(), pyobjects.PyClass)
def is_constructor(self):
return self.pyname is not None and \
isinstance(self.pyname.get_object(), pyobjects.PyClass)
def is_instance(self):
if self.pyname is None:
return False
pyobject = self.pyname.get_object()
return isinstance(pyobject.get_type(), pyobjects.PyClass)
def get_name(self):
if self.is_constructor():
return '__init__'
if self.is_instance():
return '__call__'
return worder.get_name_at(self.resource, self.offset)
def get_passed_args(self):
result = []
source = self.source_pymodule.source_code
finder = worder.Worder(source)
if finder.is_a_function_being_called(self.offset):
start, end = finder.get_primary_range(self.offset)
parens_start, parens_end = finder.get_word_parens_range(end - 1)
call = source[start:parens_end]
parser = functionutils._FunctionParser(call, False)
args, keywords = parser.get_parameters()
for arg in args:
if self._is_id(arg):
result.append(arg)
else:
result.append('arg%d' % len(result))
for name, value in keywords:
result.append(name)
return result
def _is_id(self, arg):
def id_or_underline(c):
return c.isalpha() or c == '_'
for c in arg:
if not id_or_underline(c) and not c.isdigit():
return False
return id_or_underline(arg[0])
| lgpl-3.0 |
sigma-random/androguard | androguard/gui/treewindow.py | 8 | 4341 | from PySide import QtCore, QtGui
from androguard.core import androconf
from androguard.gui.xrefwindow import XrefDialog
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.helpers import classdot2class, Signature
class TreeWindow(QtGui.QTreeWidget):
'''TODO
'''
def __init__(self, parent=None, win=None):
super(TreeWindow, self).__init__(parent)
self.itemDoubleClicked.connect(self.itemDoubleClickedHandler)
self.mainwin = win
self.createActions()
self.header().close()
def fill(self, classes):
'''Parse all the paths (['Lcom/sogeti/example/myclass/MyActivity$1;', ...])
and build a tree using the QTreeWidgetItem insertion method.'''
root_path_node = ({}, self)
for c in sorted(classes, key=lambda c: c.name):
sig = Signature(c)
path_node = root_path_node
# Namespaces
for path in sig.class_path:
if path not in path_node[0]:
path_node[0][path] = ({},
QtGui.QTreeWidgetItem(path_node[1]))
path_node[0][path][1].setText(0, path)
path_node = path_node[0][path]
# Class
path_node[0][path] = ({},
QtGui.QTreeWidgetItem(path_node[1]))
path_node[0][path][1].setText(0, sig.class_name)
def item2path(self, item, column=0):
'''Browse all parents from QTreeWidgetItem item
in order to rebuild the complete path
Return both complete path (ex: "Landroid/support/AccessibilityServiceInfoCompat$1;")
and path_elts (ex: [u'Landroid', u'support', u'AccessibilityServiceInfoCompat$1;'])
'''
path_elts = []
while item is not None:
# print item.text(column)
path_elts.append(item.text(column))
item = item.parent()
path_elts.reverse()
path = ".".join(path_elts)
path = classdot2class(path)
return path, path_elts
def itemDoubleClickedHandler(self, item, column):
'''Signal sent by PySide when a tree element is clicked'''
# print "item %s has been double clicked at column %s" % (str(item), str(column))
path, path_elts = self.item2path(item)
if item.childCount() != 0:
self.mainwin.showStatus("Sources not available. %s is not a class" % path)
return
self.mainwin.openSourceWindow(path)
def createActions(self):
self.xrefAct = QtGui.QAction("Xref from...", self,
# shortcut=QtGui.QKeySequence("CTRL+B"),
statusTip="List the references where this element is used",
triggered=self.actionXref)
self.expandAct = QtGui.QAction("Expand...", self,
statusTip="Expand all the subtrees",
triggered=self.actionExpand)
self.collapseAct = QtGui.QAction("Collapse...", self,
statusTip="Collapse all the subtrees",
triggered=self.actionCollapse)
def actionXref(self):
item = self.currentItem()
path, path_elts = self.item2path(item)
if item.childCount() != 0:
self.mainwin.showStatus("Xref not available. %s is not a class" % path)
return
xrefs_list = XrefDialog.get_xrefs_list(self.mainwin.d, path=path)
if not xrefs_list:
self.mainwin.showStatus("No xref returned.")
return
xwin = XrefDialog(parent=self.mainwin, win=self.mainwin, xrefs_list=xrefs_list, path=path)
xwin.show()
def expand_children(self, item):
self.expandItem(item)
for i in range(item.childCount()):
self.expand_children(item.child(i))
def actionExpand(self):
self.expand_children(self.currentItem())
def collapse_children(self, item):
for i in range(item.childCount()):
self.collapse_children(item.child(i))
self.collapseItem(item)
def actionCollapse(self):
self.collapse_children(self.currentItem())
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.xrefAct)
menu.addAction(self.expandAct)
menu.addAction(self.collapseAct)
menu.exec_(event.globalPos())
| apache-2.0 |
bardes/sonitus | tools/tone_gen.py | 1 | 1271 | #!/usr/bin/env python
from sys import argv, stderr
usage = \
"""
Usage: {program} <sample rate> <A4 freq.> [octaves=8]
e.g.: {program} 64000 442.0 5
""".format(program=argv[0])
if len(argv) < 3 or len(argv) > 4 :
print(usage, file = stderr)
exit(1)
A4 = 0
sample_rate = 0
octaves = 8
try:
A4 = float(argv[2])
except:
print("Error, invalid argument: Freq. must be a number!", file = stderr)
print(usage, file = stderr)
exit(1)
try:
sample_rate = int(argv[1])
except:
print("Error, invalid argument: Sample rate must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
if len(argv) == 4 :
try:
octaves = int(argv[3])
except:
print("Error, invalid argument: Octaves must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
freq_ratio = 2**(1/12)
base_freq = A4/(freq_ratio**57)
periods = [round(sample_rate/(2*base_freq*freq_ratio**t)) \
for t in range(0, 12*octaves)]
print("uint16_t tone_periods[{ntones}] = {{".format(ntones=12*octaves))
for o in range(0, octaves):
print('\t', end='')
for i in range(0, 12):
print("{period}, ".format(period=periods[12*o+i]), end='')
print('')
print("};")
| mit |
yewang15215/django | tests/template_tests/filter_tests/test_phone2numeric.py | 176 | 1500 | from django.template.defaultfilters import phone2numeric_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class Phone2numericTests(SimpleTestCase):
@setup({'phone2numeric01': '{{ a|phone2numeric }} {{ b|phone2numeric }}'})
def test_phone2numeric01(self):
output = self.engine.render_to_string(
'phone2numeric01',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric02': '{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}'})
def test_phone2numeric02(self):
output = self.engine.render_to_string(
'phone2numeric02',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric03': '{{ a|phone2numeric }}'})
def test_phone2numeric03(self):
output = self.engine.render_to_string(
'phone2numeric03',
{'a': 'How razorback-jumping frogs can level six piqued gymnasts!'},
)
self.assertEqual(
output,
'469 729672225-5867464 37647 226 53835 749 747833 49662787!'
)
class FunctionTests(SimpleTestCase):
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
| bsd-3-clause |
ellipsys/discover | misc/python/ex2.py | 6 | 1050 | #!/usr/bin/env python
import os
from xml.dom.minidom import parse
import xml.dom.minidom
os.system('clear')
# Open XML document using minidom parser
DOMTree = xml.dom.minidom.parse('movies.xml')
collection = DOMTree.documentElement
if collection.hasAttribute('shelf'):
print '\n\nRoot element: %s\n' % collection.getAttribute('shelf')
# Get all the movies in the collection
movies = collection.getElementsByTagName('movie')
# Print details of each movie.
for movie in movies:
print '***** Movie *****'
if movie.hasAttribute('title'):
print 'Title: %s' % movie.getAttribute('title')
type = movie.getElementsByTagName('type')[0]
print 'Type: %s' % type.childNodes[0].data
format = movie.getElementsByTagName('format')[0]
print 'Format: %s' % format.childNodes[0].data
rating = movie.getElementsByTagName('rating')[0]
print 'Rating: %s' % rating.childNodes[0].data
description = movie.getElementsByTagName('description')[0]
print 'Description: %s' % description.childNodes[0].data
| bsd-3-clause |
cihai/cihai | cihai/exc.py | 1 | 2127 | """Exceptions raised from the Cihai library."""
class CihaiException(Exception):
"""Base Cihai Exception class."""
class ImportStringError(ImportError, CihaiException):
"""
Provides information about a failed :func:`import_string` attempt.
Notes
-----
This is from werkzeug.utils c769200 on May 23, LICENSE BSD.
https://github.com/pallets/werkzeug
Changes:
- Deferred load import import_string from cihai.util
- Format with black
"""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
from .utils import import_string
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s'
)
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (
import_name,
'\n'.join(track),
exception.__class__.__name__,
str(exception),
)
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (
self.__class__.__name__,
self.import_name,
self.exception,
)
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/test_imghdr.py | 87 | 4413 | import imghdr
import io
import os
import unittest
import warnings
from test.support import findfile, TESTFN, unlink
TEST_FILES = (
('python.png', 'png'),
('python.gif', 'gif'),
('python.bmp', 'bmp'),
('python.ppm', 'ppm'),
('python.pgm', 'pgm'),
('python.pbm', 'pbm'),
('python.jpg', 'jpeg'),
('python.ras', 'rast'),
('python.sgi', 'rgb'),
('python.tiff', 'tiff'),
('python.xbm', 'xbm')
)
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class TestImghdr(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testfile = findfile('python.png', subdir='imghdrdata')
with open(cls.testfile, 'rb') as stream:
cls.testdata = stream.read()
def tearDown(self):
unlink(TESTFN)
def test_data(self):
for filename, expected in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(filename), expected)
with open(filename, 'rb') as stream:
self.assertEqual(imghdr.what(stream), expected)
with open(filename, 'rb') as stream:
data = stream.read()
self.assertEqual(imghdr.what(None, data), expected)
self.assertEqual(imghdr.what(None, bytearray(data)), expected)
def test_register_test(self):
def test_jumbo(h, file):
if h.startswith(b'eggs'):
return 'ham'
imghdr.tests.append(test_jumbo)
self.addCleanup(imghdr.tests.pop)
self.assertEqual(imghdr.what(None, b'eggs'), 'ham')
def test_file_pos(self):
with open(TESTFN, 'wb') as stream:
stream.write(b'ababagalamaga')
pos = stream.tell()
stream.write(self.testdata)
with open(TESTFN, 'rb') as stream:
stream.seek(pos)
self.assertEqual(imghdr.what(stream), 'png')
self.assertEqual(stream.tell(), pos)
def test_bad_args(self):
with self.assertRaises(TypeError):
imghdr.what()
with self.assertRaises(AttributeError):
imghdr.what(None)
with self.assertRaises(TypeError):
imghdr.what(self.testfile, 1)
with self.assertRaises(AttributeError):
imghdr.what(os.fsencode(self.testfile))
with open(self.testfile, 'rb') as f:
with self.assertRaises(AttributeError):
imghdr.what(f.fileno())
def test_invalid_headers(self):
for header in (b'\211PN\r\n',
b'\001\331',
b'\x59\xA6',
b'cutecat',
b'000000JFI',
b'GIF80'):
self.assertIsNone(imghdr.what(None, header))
def test_string_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
for filename, _ in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
with open(filename, 'rb') as stream:
data = stream.read().decode('latin1')
with self.assertRaises(TypeError):
imghdr.what(io.StringIO(data))
with self.assertRaises(TypeError):
imghdr.what(None, data)
def test_missing_file(self):
with self.assertRaises(FileNotFoundError):
imghdr.what('missing')
def test_closed_file(self):
stream = open(self.testfile, 'rb')
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
stream = io.BytesIO(self.testdata)
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
def test_unseekable(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
with UnseekableIO(TESTFN, 'rb') as stream:
with self.assertRaises(io.UnsupportedOperation):
imghdr.what(stream)
def test_output_stream(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
stream.seek(0)
with self.assertRaises(OSError) as cm:
imghdr.what(stream)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
sam-tsai/django | tests/gis_tests/gis_migrations/migrations/0001_initial.py | 269 | 2465 | from django.db import connection, migrations, models
from ...models import models as gis_models
ops = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', gis_models.MultiPolygonField(srid=4326)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighborhood', models.ForeignKey(
'gis_migrations.Neighborhood',
models.SET_NULL,
to_field='id',
null=True,
)),
('address', models.CharField(max_length=100)),
('zip_code', models.IntegerField(null=True, blank=True)),
('geom', gis_models.PointField(srid=4326, geography=True)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='household',
name='family',
field=models.ForeignKey('gis_migrations.Family', models.SET_NULL, blank=True, null=True),
preserve_default=True,
)
]
if connection.features.gis_enabled and connection.features.supports_raster:
ops += [
migrations.CreateModel(
name='Heatmap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('rast', gis_models.fields.RasterField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
class Migration(migrations.Migration):
"""
Used for gis-specific migration tests.
"""
operations = ops
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.